code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import argparse
import os
import cv2
import numpy as np
def otsu(source_path, destination_path):
# Load image into memory. Convert it to grayscale
image = cv2.imread(source_path, cv2.IMREAD_GRAYSCALE)
# Calculate histogram and center of the bins
hist, edges = np.histogram(image.flatten(), 256, [0, 256])
# Last edge is redundant
edges = edges[:-1]
# Precalculate everything in a vectorized style
cdf = np.cumsum(hist)
area = np.cumsum(hist * edges)
# Placeholders for best split
max_sigma = None
threshold = None
# Last threshold value is ommited
for t in range(len(edges) - 1):
# Parameters of current threshold
alpha = area[t]
beta = cdf[t]
# Probability of the first class
p = beta / cdf[-1]
# Calculate interclass variance
a = alpha / beta - (area[-1] - alpha) / (cdf[-1] - beta)
sigma = p * (1 - p) * a * a
# Choose threshold with biggest variance
if not max_sigma or sigma > max_sigma:
max_sigma = sigma
threshold = t
# Binarize image
image[image <= threshold] = 0
image[image > threshold] = 255
cv2.imwrite(destination_path, image.astype(np.uint8))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Perform otsu binarization.')
parser.add_argument('source_path', metavar='source_path', type=str,
help='Path to the original image.')
parser.add_argument('destination_path', metavar='destination_path', type=str,
help='Path to the processed image.')
args = parser.parse_args()
if not os.path.exists(args.source_path):
raise FileNotFoundError
otsu(args.source_path, args.destination_path)
| [
"cv2.imread",
"os.path.exists",
"argparse.ArgumentParser",
"numpy.cumsum"
] | [((166, 211), 'cv2.imread', 'cv2.imread', (['source_path', 'cv2.IMREAD_GRAYSCALE'], {}), '(source_path, cv2.IMREAD_GRAYSCALE)\n', (176, 211), False, 'import cv2\n'), ((441, 456), 'numpy.cumsum', 'np.cumsum', (['hist'], {}), '(hist)\n', (450, 456), True, 'import numpy as np\n'), ((468, 491), 'numpy.cumsum', 'np.cumsum', (['(hist * edges)'], {}), '(hist * edges)\n', (477, 491), True, 'import numpy as np\n'), ((1288, 1353), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Perform otsu binarization."""'}), "(description='Perform otsu binarization.')\n", (1311, 1353), False, 'import argparse\n'), ((1673, 1705), 'os.path.exists', 'os.path.exists', (['args.source_path'], {}), '(args.source_path)\n', (1687, 1705), False, 'import os\n')] |
import matplotlib.pyplot as plt
import numpy as np
import torch
from model.generator import Generator
from model.discriminator import Discriminator
from loss.WGANGP import PG_Gradient_Penalty
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
from torchvision.transforms import Compose, ToTensor
from os import getcwd
from numpy import array, log2, interp, linspace
from numpy.random import randn
from time import time, sleep
import matplotlib.gridspec as gridspec
config = {'channels':[128,128,128,128,128,128,128], # must be len(config['sr]) + 1
'latent_size':128,
'sr':[4, 8, 16, 32, 64, 128], # spatial resolution
'start_sr':32,
'level_batch_size':[256, 256, 256, 128, 64, 16],
'epochs_before_jump':[16, 15, 15, 15, 15, 15],
'learning_rate_generator':0.1,
'learning_rate_critic':0.1,
'generator_betas':(0.0, 0.99),
'critic_betas':(0.0, 0.99),
'ncrit':1,
'critic_lambda':10.,
'epsilon_drift':0.001,
'dataset_dir':'/home/deniz/Desktop/data_set/CelebAMask-HQ/',
'stat_format':'epoch {:4d} resolution {:4d} critic_loss {:6.4f} generator_loss {:6.4f} time {:6f}'}
level_index = 4
device = torch.device('cuda:0')
def show_img(d):
plt.clf()
h = 10
s = d.shape[0]
fig = plt.figure(figsize=(config['sr'][level_index], config['sr'][level_index]))
m = int(s / h)
ax = [plt.subplot(m+1,h,i) for i in range(1, s+1)]
for i in range(1, s+1):
plt.axis('on')
ax[i-1].imshow(d[i-1,:,:], cmap='gray')
ax[i-1].set_xticklabels([])
ax[i-1].set_yticklabels([])
ax[i-1].set_aspect('equal')
fig.subplots_adjust(hspace=0, wspace=0.1)
fig.tight_layout()
plt.show(block=False)
plt.pause(10)
plt.close()
return
generator = Generator(config['sr'][level_index], config, transition=True, save_checkpoint=False).to(device)
x = torch.randn(20, config['latent_size']).to(device)
a = generator(x) # .reshape(3, config['sr'][level_index], config['sr'][level_index])
image = array((a).tolist()).astype(int)
image = np.transpose(image, (0,2,3,1))
show_img(image)
'''
# uncomment this for checking the progress of the network
while True:
generator = Generator(config['sr'][level_index], config, transition=False, transition_coef=0.8, save_checkpoint=False).to(device)
x1 = randn(config['latent_size'])
x2 = randn(config['latent_size'])
alpha = linspace(0.,1.,40)
d = []
for i in alpha:
d.append(x1 * i + x2 * (1-i))
kk = torch.Tensor(array(d)).to(device)
a = generator(kk) # .reshape(3, config['sr'][level_index], config['sr'][level_index])
image = array((a).tolist()).astype(int)
image = np.transpose(image, (0,2,3,1))
show_img(image)
print()
''' | [
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.close",
"numpy.transpose",
"matplotlib.pyplot.axis",
"torch.randn",
"matplotlib.pyplot.figure",
"model.generator.Generator",
"torch.device",
"matplotlib.pyplot.pause"
] | [((1274, 1296), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (1286, 1296), False, 'import torch\n'), ((2170, 2203), 'numpy.transpose', 'np.transpose', (['image', '(0, 2, 3, 1)'], {}), '(image, (0, 2, 3, 1))\n', (2182, 2203), True, 'import numpy as np\n'), ((1320, 1329), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1327, 1329), True, 'import matplotlib.pyplot as plt\n'), ((1370, 1444), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': "(config['sr'][level_index], config['sr'][level_index])"}), "(figsize=(config['sr'][level_index], config['sr'][level_index]))\n", (1380, 1444), True, 'import matplotlib.pyplot as plt\n'), ((1800, 1821), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (1808, 1821), True, 'import matplotlib.pyplot as plt\n'), ((1830, 1843), 'matplotlib.pyplot.pause', 'plt.pause', (['(10)'], {}), '(10)\n', (1839, 1843), True, 'import matplotlib.pyplot as plt\n'), ((1848, 1859), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1857, 1859), True, 'import matplotlib.pyplot as plt\n'), ((1474, 1498), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(m + 1)', 'h', 'i'], {}), '(m + 1, h, i)\n', (1485, 1498), True, 'import matplotlib.pyplot as plt\n'), ((1555, 1569), 'matplotlib.pyplot.axis', 'plt.axis', (['"""on"""'], {}), "('on')\n", (1563, 1569), True, 'import matplotlib.pyplot as plt\n'), ((1886, 1974), 'model.generator.Generator', 'Generator', (["config['sr'][level_index]", 'config'], {'transition': '(True)', 'save_checkpoint': '(False)'}), "(config['sr'][level_index], config, transition=True,\n save_checkpoint=False)\n", (1895, 1974), False, 'from model.generator import Generator\n'), ((1987, 2025), 'torch.randn', 'torch.randn', (['(20)', "config['latent_size']"], {}), "(20, config['latent_size'])\n", (1998, 2025), False, 'import torch\n')] |
__all__ = ['extract_multiedge', 'summarize_multigraph']
import sys
import traceback
import warnings
from functools import partial
from multiprocessing import Manager, Pool
from os.path import getsize, isfile
from sys import version_info
import hiwenet
import networkx as nx
import numpy as np
if version_info.major > 2:
from graynet.utils import stamp_expt_multiedge, check_params_multiedge, make_output_path_graph, \
save_graph, check_subjects, check_stat_methods, check_num_bins, check_weights, \
check_num_procs, check_atlas, check_edge_range_dict, mask_background_roi, warn_nan, \
stamp_expt_weight, import_features, save_per_subject_graph
from graynet import parcellate
from graynet import config_graynet as cfg
else:
raise NotImplementedError(
'graynet supports only Python 2.7 or 3+. Upgrade to Python 3+ is recommended.')
def extract_multiedge(subject_id_list,
input_dir,
base_feature_list=cfg.default_features_multi_edge,
weight_method_list=cfg.default_weight_method,
summary_stats=cfg.multi_edge_summary_func_default,
num_bins=cfg.default_num_bins,
edge_range_dict=cfg.edge_range_predefined,
atlas=cfg.default_atlas,
smoothing_param=cfg.default_smoothing_param,
node_size=cfg.default_node_size,
out_dir=None,
return_results=False,
overwrite_results=False,
num_procs=cfg.default_num_procs):
"""
Extracts weighted networks (matrix of pair-wise ROI distances) based on multiple gray matter features based on Freesurfer processing.
Parameters
----------
subject_id_list : str or list
must be path to a file containing subject IDs, or a list of subject IDs
input_dir : str
Path to the input directory where features can be read.
For example, this can be Freesurfer's SUBJECTS_DIR, where output processing is stored.
Or another directory with a structure that graynet can parse.
base_feature_list : list
Set of features that drive the different edges between the pair of ROIs.
For example, if you choose thickness and pial_curv, each pair of ROIs will have two edges.
This multi-edge network can be turned into a single network based on averaging weights from different individual networks.
weight_method : string(s), optional
Type of distance (or metric) to compute between the pair of histograms.
It must be one of the following methods:
- 'chebyshev'
- 'chebyshev_neg'
- 'chi_square'
- 'correlate'
- 'correlate_1'
- 'cosine'
- 'cosine_1'
- 'cosine_2'
- 'cosine_alt'
- 'euclidean'
- 'fidelity_based'
- 'histogram_intersection'
- 'histogram_intersection_1'
- 'jensen_shannon'
- 'kullback_leibler'
- 'manhattan'
- 'minowski'
- 'noelle_1'
- 'noelle_2'
- 'noelle_3'
- 'noelle_4'
- 'noelle_5'
- 'relative_bin_deviation'
- 'relative_deviation'
Note only the following are *metrics*:
- 'manhattan'
- 'minowski'
- 'euclidean'
- 'noelle_2'
- 'noelle_4'
- 'noelle_5'
The following are *semi- or quasi-metrics*:
- 'kullback_leibler'
- 'jensen_shannon'
- 'chi_square'
- 'chebyshev'
- 'cosine_1'
- 'chebyshev_neg'
- 'correlate_1'
- 'histogram_intersection_1'
- 'relative_deviation'
- 'relative_bin_deviation'
- 'noelle_1'
- 'noelle_3'
The following are classified to be similarity functions:
- 'histogram_intersection'
- 'correlate'
- 'cosine'
- 'cosine_2'
- 'cosine_alt'
- 'fidelity_based'
*Default* choice: 'manhattan'.
summary_stats : list of str
A string, or list of strings, each representing a method (like 'median', 'prod' or 'max'),
to compute a summay statistic from the array of multiple weights computed.
This must be available as a member of numpy or scipy.stats.
num_bins : int
Number of histogram bins to use when computing pair-wise weights based on histogram distance. Default : 25
edge_range_dict : tuple or list
The range of edges (two finite values) within which to build the histogram e.g. ``--edge_range 0 5``.
This can be helpful (and important) to ensure correspondence across multiple invocations of graynet (e.g. for different subjects), in terms of range across all bins as well as individual bin edges.
Default :
- ( 0.0, 5.0) for ``freesurfer_thickness`` and
- (-0.3, 0.3) for ``freesurfer_curv``.
atlas : str
Name of the atlas whose parcellation to be used.
Choices for cortical parcellation: ['fsaverage', 'glasser2016'], which are primary cortical.
Volumetric whole-brain atlases will be added soon.
smoothing_param : scalar
Smoothing parameter, which could be fwhm for Freesurfer cortical features,
or another relevant for the chosen base_feature_list.
Default: assumed as fwhm=10mm for the default feature choice 'thickness'
node_size : scalar, optional
Parameter to indicate the size of the ROIs, subparcels or patches, depending on type of atlas or feature.
This feature is not implemented yet, just a placeholder and to enable default computation.
out_dir : str, optional
Path to output directory to store results.
Default: None, results are returned, but not saved to disk.
If this is None, return_results must be true.
return_results : bool
Flag to indicate whether to return the results to be returned.
This flag helps to reduce the memory requirements, when the number of nodes in a parcellation or
the number of subjects or weight methods are large, as it doesn't retain results for all combinations,
when running from commmand line interface (or HPC). Default: False
If this is False, out_dir must be specified to save the results to disk.
overwrite_results : bool
Flag to request overwriting of existing results, in case of reruns/failed jobs. By default, if the expected output file exists and is of non-zero size, its computation is skipped (assuming the file is complete, usable and not corrupted).
num_procs : int
Number of parallel processes to use to speed up computation.
Returns
-------
edge_weights_all : dict, None
If return_results is True, this will be a dictionary keyed in by a tuple: (weight method, subject_ID)
The value of each edge_weights_all[(weight method, subject_ID)] is
a numpy array of length p = k*(k-1)/2, with k = number of nodes in the atlas parcellation.
If return_results is False, this will be None, which is the default.
"""
# volumetric version is not fully tested yet!
for feat in base_feature_list:
if feat in cfg.features_volumetric:
raise NotImplementedError('MultiEdge networks are not yet supported '
'for volumetric features! '
'They are under development. Stay tuned.')
# All the checks must happen here, as this is key function in the API
check_params_multiedge(base_feature_list, input_dir, atlas, smoothing_param,
node_size, out_dir, return_results)
atlas, atlas_name = check_atlas(atlas)
subject_id_list, num_subjects, max_id_width, nd_id = check_subjects(subject_id_list)
num_bins = check_num_bins(num_bins)
edge_range_dict = check_edge_range_dict(edge_range_dict, base_feature_list)
weight_method_list, num_weights, max_wtname_width, nd_wm = check_weights(
weight_method_list)
# validating the choice and getting a callable
summary_stats, summary_stat_names, _, _, _ = check_stat_methods(summary_stats)
num_procs = check_num_procs(num_procs)
pretty_print_options = (max_id_width, nd_id, num_weights, max_wtname_width, nd_wm)
# roi_labels, ctx_annot = parcellate.freesurfer_roi_labels(atlas)
# uniq_rois, roi_size, num_nodes = roi_info(roi_labels)
uniq_rois, centroids, roi_labels = parcellate.roi_labels_centroids(atlas)
print('\nProcessing {} features resampled to {} atlas,'
' smoothed at {} with node size {}'.format(base_feature_list, atlas_name,
smoothing_param, node_size))
if not return_results:
if out_dir is None:
raise ValueError('When return_results=False, '
'out_dir must be specified '
'to be able to save the results.')
if not out_dir.exists():
out_dir.mkdir(exist_ok=True, parents=True)
partial_func_extract = partial(per_subject_multi_edge, input_dir, base_feature_list,
roi_labels, centroids,
weight_method_list, summary_stats, summary_stat_names,
atlas, atlas_name, smoothing_param, node_size,
num_bins, edge_range_dict,
out_dir, return_results, overwrite_results, pretty_print_options)
if num_procs > 1:
chunk_size = int(np.ceil(num_subjects / num_procs))
with Manager():
with Pool(processes=num_procs) as pool:
edge_weights_list_dicts = pool.map(partial_func_extract, subject_id_list,
chunk_size)
else:
# reverting to sequential processing
edge_weights_list_dicts = [partial_func_extract(subject=sub_id) for sub_id in
subject_id_list]
if return_results:
edge_weights_all = dict()
for combo in edge_weights_list_dicts:
# each element from output of parallel loop is a dict keyed in by {subject, weight)
edge_weights_all.update(combo)
else:
edge_weights_all = None
print('\ngraynet computation done.')
return edge_weights_all
def per_subject_multi_edge(input_dir, base_feature_list, roi_labels, centroids,
weight_method_list, summary_stats, summary_stat_names,
atlas, atlas_name, smoothing_param, node_size,
num_bins, edge_range_dict,
out_dir, return_results, overwrite_results, pretty_print_options,
subject=None): # purposefully leaving it last to enable partial function creation
"""
Extracts give set of weights for one subject.
"""
if subject is None:
return
if return_results:
edge_weights_all = dict()
else:
edge_weights_all = None
max_id_width, nd_id, num_weights, max_wtname_width, nd_wm = pretty_print_options
for ww, weight_method in enumerate(weight_method_list):
expt_id_multi = stamp_expt_multiedge(base_feature_list, atlas_name,
smoothing_param, node_size,
weight_method)
out_path_multigraph = make_output_path_graph(out_dir, subject,
[expt_id_multi, 'multigraph'])
# skipping the computation if the file exists already
if not overwrite_results and isfile(out_path_multigraph) and getsize(
out_path_multigraph) > 0:
print('\nMultigraph exists already at\n\t{}\n'
' skipping its computation!'.format(out_path_multigraph))
multigraph = None # signal to re-read
else:
multigraph = nx.MultiGraph()
for base_feature in base_feature_list:
# # TODO refactor
# unigraph, weight_vec = compute_unigraph(input_dir, subject, base_feature, weight_method, roi_labels,
# atlas, smoothing_param, node_size, centroids,
# num_bins, edge_range_dict,
# out_dir, overwrite_results, pretty_print_options)
# if return_results:
# edge_weights_all[(weight_method, base_feature, subject)] = weight_vec
try:
features = import_features(input_dir,
[subject, ],
base_feature,
fwhm=smoothing_param,
atlas=atlas)
except:
traceback.print_exc()
warnings.warn('Unable to read {} features'
' for {}\n Skipping it.'.format(base_feature, subject),
UserWarning)
return
data, rois = mask_background_roi(features[subject], roi_labels,
cfg.null_roi_name)
# unique stamp for each subject and weight
expt_id_single = stamp_expt_weight(base_feature, atlas_name,
smoothing_param, node_size,
weight_method)
sys.stdout.write('\nProcessing id {:{id_width}} --'
' weight {:{wtname_width}} ({:{nd_wm}}/{:{nd_wm}})'
' :'.format(subject, weight_method, ww + 1, num_weights,
nd_id=nd_id, nd_wm=nd_wm, id_width=max_id_width,
wtname_width=max_wtname_width))
# actual computation of pair-wise features
try:
unigraph = hiwenet.extract(data,
rois,
weight_method=weight_method,
num_bins=num_bins,
edge_range=edge_range_dict[base_feature],
return_networkx_graph=True)
# retrieving edge weights
weight_vec = np.array(list(nx.get_edge_attributes(unigraph, 'weight').values()))
warn_nan(weight_vec)
if return_results:
edge_weights_all[(weight_method, base_feature, subject)] = weight_vec
except (RuntimeError, RuntimeWarning) as runexc:
print(runexc)
except KeyboardInterrupt:
print('Exiting on keyborad interrupt! \n'
'Abandoning the remaining processing ')
sys.exit(1)
except:
print('Unable to extract {} weights for {} for {}'.format(weight_method,
base_feature,
subject))
traceback.print_exc()
print('Done.')
# TODO consider extracting some network features upon user request.
add_nodal_positions(unigraph, centroids)
save_per_subject_graph(unigraph, out_dir, subject, expt_id_single)
# adding edges/weights from each feature to a multigraph
# this also encodes the sources
for u, v in unigraph.edges():
multigraph.add_edge(u, v,
weight=unigraph[u][v]['weight'],
base_feature=base_feature)
# adding position info to nodes (for visualization later)
add_nodal_positions(multigraph, centroids)
save_graph(multigraph, out_path_multigraph, 'multi-edge')
for stat_func, stat_name in zip(summary_stats, summary_stat_names):
# creating single graph with a summary edge weight (like median)
out_path_summary = make_output_path_graph(out_dir, subject,
[expt_id_multi, stat_name, 'multigraph'])
if not overwrite_results and isfile(out_path_summary) and getsize(out_path_summary) > 0:
print(
'Summary {} of multigraph exists already at\n\t{}\n skipping its computation!'.format(
stat_name, out_path_summary))
else:
if multigraph is None:
multigraph = nx.read_graphml(out_path_multigraph)
try:
summary_multigraph = summarize_multigraph(multigraph, stat_func)
add_nodal_positions(summary_multigraph, centroids)
save_graph(summary_multigraph, out_path_summary, '{} summary'.format(stat_name))
except:
print('Summary {} could not be computed - skipping!'.format(stat_name))
traceback.print_exc()
return edge_weights_all
def summarize_multigraph(multigraph, func_summary):
"Creating single graph with a summary edge weight (like median)"
summary_multigraph = nx.Graph()
for u, v in multigraph.edges():
# looping through parallel edges and obtaining their weights.
all_weights = np.array([edge_item['weight'] for idx, edge_item in multigraph[u][v].items()])
summary_weight = float(func_summary(all_weights)) # float needed due to graphml limitation
summary_multigraph.add_edge(u, v, weight=summary_weight)
return summary_multigraph
def add_nodal_positions(graph, centroids):
"Adds the x, y, z attributes to each node in graph."
# adding position info to nodes (for visualization later)
for roi in centroids:
graph.nodes[roi]['x'] = float(centroids[roi][0])
graph.nodes[roi]['y'] = float(centroids[roi][1])
graph.nodes[roi]['z'] = float(centroids[roi][2])
return
def save_summary_graph(graph, out_dir, subject,
str_suffix=None,
summary_descr='summary'):
"Saves the features to disk."
if out_dir is not None:
# get outpath returned from hiwenet, based on dist name and all other parameters
# choose out_dir name based on dist name and all other parameters
out_subject_dir = out_dir.joinpath(subject)
if not out_subject_dir.exists():
out_subject_dir.mkdir(exist_ok=True, parents=True)
if str_suffix is not None:
out_file_name = '{}_{}_multigraph_graynet.graphml' \
''.format(str_suffix, summary_descr)
else:
out_file_name = '_{}_multigraph_graynet.graphml'.format(summary_descr)
out_weights_path = out_subject_dir / out_file_name
try:
nx.info(graph)
nx.write_graphml(graph, out_weights_path, encoding='utf-8')
print('\nSaved the summary multi-graph to \n{}'.format(out_weights_path))
except:
print('\nUnable to save summary multi-graph to \n{}'.format(out_weights_path))
traceback.print_exc()
return
| [
"graynet.utils.warn_nan",
"networkx.MultiGraph",
"graynet.utils.check_weights",
"os.path.isfile",
"networkx.info",
"graynet.utils.import_features",
"graynet.utils.make_output_path_graph",
"graynet.utils.save_graph",
"traceback.print_exc",
"graynet.utils.stamp_expt_multiedge",
"graynet.utils.chec... | [((7632, 7748), 'graynet.utils.check_params_multiedge', 'check_params_multiedge', (['base_feature_list', 'input_dir', 'atlas', 'smoothing_param', 'node_size', 'out_dir', 'return_results'], {}), '(base_feature_list, input_dir, atlas, smoothing_param,\n node_size, out_dir, return_results)\n', (7654, 7748), False, 'from graynet.utils import stamp_expt_multiedge, check_params_multiedge, make_output_path_graph, save_graph, check_subjects, check_stat_methods, check_num_bins, check_weights, check_num_procs, check_atlas, check_edge_range_dict, mask_background_roi, warn_nan, stamp_expt_weight, import_features, save_per_subject_graph\n'), ((7796, 7814), 'graynet.utils.check_atlas', 'check_atlas', (['atlas'], {}), '(atlas)\n', (7807, 7814), False, 'from graynet.utils import stamp_expt_multiedge, check_params_multiedge, make_output_path_graph, save_graph, check_subjects, check_stat_methods, check_num_bins, check_weights, check_num_procs, check_atlas, check_edge_range_dict, mask_background_roi, warn_nan, stamp_expt_weight, import_features, save_per_subject_graph\n'), ((7873, 7904), 'graynet.utils.check_subjects', 'check_subjects', (['subject_id_list'], {}), '(subject_id_list)\n', (7887, 7904), False, 'from graynet.utils import stamp_expt_multiedge, check_params_multiedge, make_output_path_graph, save_graph, check_subjects, check_stat_methods, check_num_bins, check_weights, check_num_procs, check_atlas, check_edge_range_dict, mask_background_roi, warn_nan, stamp_expt_weight, import_features, save_per_subject_graph\n'), ((7921, 7945), 'graynet.utils.check_num_bins', 'check_num_bins', (['num_bins'], {}), '(num_bins)\n', (7935, 7945), False, 'from graynet.utils import stamp_expt_multiedge, check_params_multiedge, make_output_path_graph, save_graph, check_subjects, check_stat_methods, check_num_bins, check_weights, check_num_procs, check_atlas, check_edge_range_dict, mask_background_roi, warn_nan, stamp_expt_weight, import_features, save_per_subject_graph\n'), ((7968, 8025), 'graynet.utils.check_edge_range_dict', 'check_edge_range_dict', (['edge_range_dict', 'base_feature_list'], {}), '(edge_range_dict, base_feature_list)\n', (7989, 8025), False, 'from graynet.utils import stamp_expt_multiedge, check_params_multiedge, make_output_path_graph, save_graph, check_subjects, check_stat_methods, check_num_bins, check_weights, check_num_procs, check_atlas, check_edge_range_dict, mask_background_roi, warn_nan, stamp_expt_weight, import_features, save_per_subject_graph\n'), ((8089, 8122), 'graynet.utils.check_weights', 'check_weights', (['weight_method_list'], {}), '(weight_method_list)\n', (8102, 8122), False, 'from graynet.utils import stamp_expt_multiedge, check_params_multiedge, make_output_path_graph, save_graph, check_subjects, check_stat_methods, check_num_bins, check_weights, check_num_procs, check_atlas, check_edge_range_dict, mask_background_roi, warn_nan, stamp_expt_weight, import_features, save_per_subject_graph\n'), ((8233, 8266), 'graynet.utils.check_stat_methods', 'check_stat_methods', (['summary_stats'], {}), '(summary_stats)\n', (8251, 8266), False, 'from graynet.utils import stamp_expt_multiedge, check_params_multiedge, make_output_path_graph, save_graph, check_subjects, check_stat_methods, check_num_bins, check_weights, check_num_procs, check_atlas, check_edge_range_dict, mask_background_roi, warn_nan, stamp_expt_weight, import_features, save_per_subject_graph\n'), ((8284, 8310), 'graynet.utils.check_num_procs', 'check_num_procs', (['num_procs'], {}), '(num_procs)\n', (8299, 8310), False, 'from graynet.utils import stamp_expt_multiedge, check_params_multiedge, make_output_path_graph, save_graph, check_subjects, check_stat_methods, check_num_bins, check_weights, check_num_procs, check_atlas, check_edge_range_dict, mask_background_roi, warn_nan, stamp_expt_weight, import_features, save_per_subject_graph\n'), ((8568, 8606), 'graynet.parcellate.roi_labels_centroids', 'parcellate.roi_labels_centroids', (['atlas'], {}), '(atlas)\n', (8599, 8606), False, 'from graynet import parcellate\n'), ((9187, 9478), 'functools.partial', 'partial', (['per_subject_multi_edge', 'input_dir', 'base_feature_list', 'roi_labels', 'centroids', 'weight_method_list', 'summary_stats', 'summary_stat_names', 'atlas', 'atlas_name', 'smoothing_param', 'node_size', 'num_bins', 'edge_range_dict', 'out_dir', 'return_results', 'overwrite_results', 'pretty_print_options'], {}), '(per_subject_multi_edge, input_dir, base_feature_list, roi_labels,\n centroids, weight_method_list, summary_stats, summary_stat_names, atlas,\n atlas_name, smoothing_param, node_size, num_bins, edge_range_dict,\n out_dir, return_results, overwrite_results, pretty_print_options)\n', (9194, 9478), False, 'from functools import partial\n'), ((17864, 17874), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (17872, 17874), True, 'import networkx as nx\n'), ((11374, 11472), 'graynet.utils.stamp_expt_multiedge', 'stamp_expt_multiedge', (['base_feature_list', 'atlas_name', 'smoothing_param', 'node_size', 'weight_method'], {}), '(base_feature_list, atlas_name, smoothing_param,\n node_size, weight_method)\n', (11394, 11472), False, 'from graynet.utils import stamp_expt_multiedge, check_params_multiedge, make_output_path_graph, save_graph, check_subjects, check_stat_methods, check_num_bins, check_weights, check_num_procs, check_atlas, check_edge_range_dict, mask_background_roi, warn_nan, stamp_expt_weight, import_features, save_per_subject_graph\n'), ((11589, 11660), 'graynet.utils.make_output_path_graph', 'make_output_path_graph', (['out_dir', 'subject', "[expt_id_multi, 'multigraph']"], {}), "(out_dir, subject, [expt_id_multi, 'multigraph'])\n", (11611, 11660), False, 'from graynet.utils import stamp_expt_multiedge, check_params_multiedge, make_output_path_graph, save_graph, check_subjects, check_stat_methods, check_num_bins, check_weights, check_num_procs, check_atlas, check_edge_range_dict, mask_background_roi, warn_nan, stamp_expt_weight, import_features, save_per_subject_graph\n'), ((9689, 9722), 'numpy.ceil', 'np.ceil', (['(num_subjects / num_procs)'], {}), '(num_subjects / num_procs)\n', (9696, 9722), True, 'import numpy as np\n'), ((9737, 9746), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (9744, 9746), False, 'from multiprocessing import Manager, Pool\n'), ((11813, 11840), 'os.path.isfile', 'isfile', (['out_path_multigraph'], {}), '(out_path_multigraph)\n', (11819, 11840), False, 'from os.path import getsize, isfile\n'), ((12121, 12136), 'networkx.MultiGraph', 'nx.MultiGraph', ([], {}), '()\n', (12134, 12136), True, 'import networkx as nx\n'), ((16457, 16514), 'graynet.utils.save_graph', 'save_graph', (['multigraph', 'out_path_multigraph', '"""multi-edge"""'], {}), "(multigraph, out_path_multigraph, 'multi-edge')\n", (16467, 16514), False, 'from graynet.utils import stamp_expt_multiedge, check_params_multiedge, make_output_path_graph, save_graph, check_subjects, check_stat_methods, check_num_bins, check_weights, check_num_procs, check_atlas, check_edge_range_dict, mask_background_roi, warn_nan, stamp_expt_weight, import_features, save_per_subject_graph\n'), ((16700, 16786), 'graynet.utils.make_output_path_graph', 'make_output_path_graph', (['out_dir', 'subject', "[expt_id_multi, stat_name, 'multigraph']"], {}), "(out_dir, subject, [expt_id_multi, stat_name,\n 'multigraph'])\n", (16722, 16786), False, 'from graynet.utils import stamp_expt_multiedge, check_params_multiedge, make_output_path_graph, save_graph, check_subjects, check_stat_methods, check_num_bins, check_weights, check_num_procs, check_atlas, check_edge_range_dict, mask_background_roi, warn_nan, stamp_expt_weight, import_features, save_per_subject_graph\n'), ((19523, 19537), 'networkx.info', 'nx.info', (['graph'], {}), '(graph)\n', (19530, 19537), True, 'import networkx as nx\n'), ((19550, 19609), 'networkx.write_graphml', 'nx.write_graphml', (['graph', 'out_weights_path'], {'encoding': '"""utf-8"""'}), "(graph, out_weights_path, encoding='utf-8')\n", (19566, 19609), True, 'import networkx as nx\n'), ((9765, 9790), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'num_procs'}), '(processes=num_procs)\n', (9769, 9790), False, 'from multiprocessing import Manager, Pool\n'), ((11845, 11873), 'os.path.getsize', 'getsize', (['out_path_multigraph'], {}), '(out_path_multigraph)\n', (11852, 11873), False, 'from os.path import getsize, isfile\n'), ((13422, 13491), 'graynet.utils.mask_background_roi', 'mask_background_roi', (['features[subject]', 'roi_labels', 'cfg.null_roi_name'], {}), '(features[subject], roi_labels, cfg.null_roi_name)\n', (13441, 13491), False, 'from graynet.utils import stamp_expt_multiedge, check_params_multiedge, make_output_path_graph, save_graph, check_subjects, check_stat_methods, check_num_bins, check_weights, check_num_procs, check_atlas, check_edge_range_dict, mask_background_roi, warn_nan, stamp_expt_weight, import_features, save_per_subject_graph\n'), ((13648, 13738), 'graynet.utils.stamp_expt_weight', 'stamp_expt_weight', (['base_feature', 'atlas_name', 'smoothing_param', 'node_size', 'weight_method'], {}), '(base_feature, atlas_name, smoothing_param, node_size,\n weight_method)\n', (13665, 13738), False, 'from graynet.utils import stamp_expt_multiedge, check_params_multiedge, make_output_path_graph, save_graph, check_subjects, check_stat_methods, check_num_bins, check_weights, check_num_procs, check_atlas, check_edge_range_dict, mask_background_roi, warn_nan, stamp_expt_weight, import_features, save_per_subject_graph\n'), ((15898, 15964), 'graynet.utils.save_per_subject_graph', 'save_per_subject_graph', (['unigraph', 'out_dir', 'subject', 'expt_id_single'], {}), '(unigraph, out_dir, subject, expt_id_single)\n', (15920, 15964), False, 'from graynet.utils import stamp_expt_multiedge, check_params_multiedge, make_output_path_graph, save_graph, check_subjects, check_stat_methods, check_num_bins, check_weights, check_num_procs, check_atlas, check_edge_range_dict, mask_background_roi, warn_nan, stamp_expt_weight, import_features, save_per_subject_graph\n'), ((16878, 16902), 'os.path.isfile', 'isfile', (['out_path_summary'], {}), '(out_path_summary)\n', (16884, 16902), False, 'from os.path import getsize, isfile\n'), ((19815, 19836), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (19834, 19836), False, 'import traceback\n'), ((12821, 12911), 'graynet.utils.import_features', 'import_features', (['input_dir', '[subject]', 'base_feature'], {'fwhm': 'smoothing_param', 'atlas': 'atlas'}), '(input_dir, [subject], base_feature, fwhm=smoothing_param,\n atlas=atlas)\n', (12836, 12911), False, 'from graynet.utils import stamp_expt_multiedge, check_params_multiedge, make_output_path_graph, save_graph, check_subjects, check_stat_methods, check_num_bins, check_weights, check_num_procs, check_atlas, check_edge_range_dict, mask_background_roi, warn_nan, stamp_expt_weight, import_features, save_per_subject_graph\n'), ((14363, 14512), 'hiwenet.extract', 'hiwenet.extract', (['data', 'rois'], {'weight_method': 'weight_method', 'num_bins': 'num_bins', 'edge_range': 'edge_range_dict[base_feature]', 'return_networkx_graph': '(True)'}), '(data, rois, weight_method=weight_method, num_bins=num_bins,\n edge_range=edge_range_dict[base_feature], return_networkx_graph=True)\n', (14378, 14512), False, 'import hiwenet\n'), ((14912, 14932), 'graynet.utils.warn_nan', 'warn_nan', (['weight_vec'], {}), '(weight_vec)\n', (14920, 14932), False, 'from graynet.utils import stamp_expt_multiedge, check_params_multiedge, make_output_path_graph, save_graph, check_subjects, check_stat_methods, check_num_bins, check_weights, check_num_procs, check_atlas, check_edge_range_dict, mask_background_roi, warn_nan, stamp_expt_weight, import_features, save_per_subject_graph\n'), ((16907, 16932), 'os.path.getsize', 'getsize', (['out_path_summary'], {}), '(out_path_summary)\n', (16914, 16932), False, 'from os.path import getsize, isfile\n'), ((17212, 17248), 'networkx.read_graphml', 'nx.read_graphml', (['out_path_multigraph'], {}), '(out_path_multigraph)\n', (17227, 17248), True, 'import networkx as nx\n'), ((13143, 13164), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (13162, 13164), False, 'import traceback\n'), ((15356, 15367), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (15364, 15367), False, 'import sys\n'), ((15685, 15706), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (15704, 15706), False, 'import traceback\n'), ((17664, 17685), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (17683, 17685), False, 'import traceback\n'), ((14838, 14880), 'networkx.get_edge_attributes', 'nx.get_edge_attributes', (['unigraph', '"""weight"""'], {}), "(unigraph, 'weight')\n", (14860, 14880), True, 'import networkx as nx\n')] |
#!/usr/bin/python3
# coding: utf-8
import numpy as np
import plotly.graph_objects as go
from scipy import fftpack
def fft_denoise(x, y, showFigure=True, freq_int=0.15, freq_th=0.18, freq_min_A=0.03):
n = len(x)
if showFigure:
fig = go.Figure()
fig.add_trace(go.Scatter(x=x, y=y))
fig.show()
y_hat = fftpack.fft(y) / (n/2)
if showFigure:
fig = go.Figure()
fig.add_trace(go.Scatter(x=x, y=y_hat.real))
fig.show()
freq = fftpack.fftfreq(n, freq_int)
if showFigure and False:
fig = go.Figure()
fig.add_trace(go.Scatter(x=x, y=freq))
fig.show()
y_hat[freq < 0] = 0
y_hat[freq > freq_th] = 0
y_hat[np.abs(y_hat) < freq_min_A] = 0
if showFigure:
fig = go.Figure()
fig.add_trace(go.Scatter(x=x, y=y_hat.real))
fig.show()
y2 = np.real(fftpack.ifft(y_hat) * (n))
if showFigure:
fig = go.Figure()
fig.add_trace(go.Scatter(x=x, y=y, mode='lines',
line=dict(width=.5, color='red')))
fig.add_trace(go.Scatter(x=x, y=y2, mode='lines+markers',
marker=dict(size=1, color='blue')))
fig.show()
return y2
| [
"plotly.graph_objects.Scatter",
"numpy.abs",
"plotly.graph_objects.Figure",
"scipy.fftpack.fft",
"scipy.fftpack.ifft",
"scipy.fftpack.fftfreq"
] | [((492, 520), 'scipy.fftpack.fftfreq', 'fftpack.fftfreq', (['n', 'freq_int'], {}), '(n, freq_int)\n', (507, 520), False, 'from scipy import fftpack\n'), ((251, 262), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (260, 262), True, 'import plotly.graph_objects as go\n'), ((339, 353), 'scipy.fftpack.fft', 'fftpack.fft', (['y'], {}), '(y)\n', (350, 353), False, 'from scipy import fftpack\n'), ((396, 407), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (405, 407), True, 'import plotly.graph_objects as go\n'), ((565, 576), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (574, 576), True, 'import plotly.graph_objects as go\n'), ((774, 785), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (783, 785), True, 'import plotly.graph_objects as go\n'), ((937, 948), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (946, 948), True, 'import plotly.graph_objects as go\n'), ((285, 305), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x', 'y': 'y'}), '(x=x, y=y)\n', (295, 305), True, 'import plotly.graph_objects as go\n'), ((430, 459), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x', 'y': 'y_hat.real'}), '(x=x, y=y_hat.real)\n', (440, 459), True, 'import plotly.graph_objects as go\n'), ((599, 622), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x', 'y': 'freq'}), '(x=x, y=freq)\n', (609, 622), True, 'import plotly.graph_objects as go\n'), ((708, 721), 'numpy.abs', 'np.abs', (['y_hat'], {}), '(y_hat)\n', (714, 721), True, 'import numpy as np\n'), ((808, 837), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x', 'y': 'y_hat.real'}), '(x=x, y=y_hat.real)\n', (818, 837), True, 'import plotly.graph_objects as go\n'), ((876, 895), 'scipy.fftpack.ifft', 'fftpack.ifft', (['y_hat'], {}), '(y_hat)\n', (888, 895), False, 'from scipy import fftpack\n')] |
import plotly.graph_objects as go
import numpy as np
from styles.graphs import (
GRAPH_BACKGROUND_COLOR,
summary_bar_graph_colors,
LINE_GRAPH_GRID_COLOR,
)
from faker import Faker
fake = Faker()
def simple_bar_chart_logic():
"""Generates simple bar chart figure
Returns:
obj: Plotly figure
"""
x_values = fake.words(nb=10, unique=True)
y_values = sorted(np.random.randint(100000, size=10), reverse=True)
y2_values = sorted(np.random.randint(80000, size=10), reverse=True)
fig = go.Figure(
[
go.Bar(
x=x_values,
y=y_values,
text=y_values,
name=fake.word(),
cliponaxis=False,
marker_color=summary_bar_graph_colors[0],
),
go.Bar(
x=x_values,
y=y2_values,
text=y2_values,
name=fake.word(),
cliponaxis=False,
marker_color=summary_bar_graph_colors[1],
),
],
layout=go.Layout(
paper_bgcolor=GRAPH_BACKGROUND_COLOR,
plot_bgcolor=GRAPH_BACKGROUND_COLOR,
height=350,
margin=dict(t=20, b=20, l=20, r=20),
yaxis={"visible": False, "zeroline": False},
barmode="group",
legend=dict(
x=0.80,
y=1.0,
bgcolor="rgba(255, 255, 255, 0)",
bordercolor="rgba(255, 255, 255, 0)",
),
),
)
fig.update_traces(texttemplate="%{text:.2s}", textposition="outside")
return fig
def horizontal_bar_chart_logic():
"""Generates horizontal bar chart figure
Returns:
obj: Plotly figure
"""
y_values = sorted([np.random.uniform(90, 95), *np.random.uniform(1, 50, [7])])
x_values = fake.words(nb=8, unique=True)
fig = go.Figure(
[
go.Bar(
x=y_values,
y=x_values,
marker=dict(
color=y_values,
colorscale="Blugrn",
line=dict(color="rgba(50, 171, 96, 1.0)", width=1),
),
orientation="h",
),
],
layout=go.Layout(
paper_bgcolor=GRAPH_BACKGROUND_COLOR,
plot_bgcolor=GRAPH_BACKGROUND_COLOR,
margin=dict(t=20, b=20, l=20, r=20),
yaxis=dict(
showgrid=False,
showline=False,
showticklabels=True,
ticks="outside",
ticklen=10,
),
xaxis=dict(
zeroline=False,
showline=False,
showticklabels=True,
showgrid=True,
gridcolor=LINE_GRAPH_GRID_COLOR,
zerolinecolor=LINE_GRAPH_GRID_COLOR,
),
barmode="group",
),
)
y_s = np.round(y_values, decimals=2)
annotations = []
for y_d, x_d in zip(y_s, x_values):
annotations.append(
dict(
xref="x1",
yref="y1",
y=x_d,
x=y_d + 5,
text=str(y_d) + "%",
font=dict(family="Arial", size=12, color="rgb(50, 171, 96)"),
showarrow=False,
)
)
fig.update_layout(annotations=annotations)
return fig
def multi_group_bar_chart(num, options=None, values=None):
"""Generates multi group bar chart figure
Returns:
obj: Plotly figure
"""
x_values = fake.words(nb=10, unique=True)
text = fake.words(nb=num, unique=True)
if options and values:
text = [
x["label"] for x in options if x["value"] in values
] # no need to care about the order, dummy data
y_values = [
sorted(np.random.randint(100000, size=10), reverse=True) for _ in range(num)
]
fig = go.Figure(
[
go.Bar(
x=x_values,
y=yi,
text=yi,
name=name,
cliponaxis=False,
marker_color=color,
)
for yi, name, color in zip(y_values, text, summary_bar_graph_colors[:num])
],
layout=go.Layout(
paper_bgcolor=GRAPH_BACKGROUND_COLOR,
plot_bgcolor=GRAPH_BACKGROUND_COLOR,
margin=dict(t=20, b=20, l=20, r=20),
yaxis={"visible": False, "zeroline": False},
barmode="group",
legend=dict(
x=0.80,
y=1.0,
bgcolor="rgba(255, 255, 255, 0)",
bordercolor="rgba(255, 255, 255, 0)",
),
),
)
return fig
| [
"numpy.random.uniform",
"faker.Faker",
"plotly.graph_objects.Bar",
"numpy.random.randint",
"numpy.round"
] | [((200, 207), 'faker.Faker', 'Faker', ([], {}), '()\n', (205, 207), False, 'from faker import Faker\n'), ((2968, 2998), 'numpy.round', 'np.round', (['y_values'], {'decimals': '(2)'}), '(y_values, decimals=2)\n', (2976, 2998), True, 'import numpy as np\n'), ((399, 433), 'numpy.random.randint', 'np.random.randint', (['(100000)'], {'size': '(10)'}), '(100000, size=10)\n', (416, 433), True, 'import numpy as np\n'), ((472, 505), 'numpy.random.randint', 'np.random.randint', (['(80000)'], {'size': '(10)'}), '(80000, size=10)\n', (489, 505), True, 'import numpy as np\n'), ((1796, 1821), 'numpy.random.uniform', 'np.random.uniform', (['(90)', '(95)'], {}), '(90, 95)\n', (1813, 1821), True, 'import numpy as np\n'), ((3887, 3921), 'numpy.random.randint', 'np.random.randint', (['(100000)'], {'size': '(10)'}), '(100000, size=10)\n', (3904, 3921), True, 'import numpy as np\n'), ((4006, 4093), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'x': 'x_values', 'y': 'yi', 'text': 'yi', 'name': 'name', 'cliponaxis': '(False)', 'marker_color': 'color'}), '(x=x_values, y=yi, text=yi, name=name, cliponaxis=False, marker_color\n =color)\n', (4012, 4093), True, 'import plotly.graph_objects as go\n'), ((1824, 1853), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(50)', '[7]'], {}), '(1, 50, [7])\n', (1841, 1853), True, 'import numpy as np\n')] |
import gym
import numpy
import random
import os
from gym import error, spaces, utils
from gym.utils import seeding
from matplotlib import pyplot
from collections import OrderedDict
import gym_sted
from gym_sted import rewards, defaults
from gym_sted.utils import SynapseGenerator, MicroscopeGenerator, get_foreground, BleachSampler, Normalizer
from gym_sted.rewards import objectives
from gym_sted.prefnet import load_demonstrations
from gym_sted.defaults import obj_dict, bounds_dict, scales_dict
class STEDMultiObjectivesEnv(gym.Env):
"""
Creates a `STEDMultiObjectivesEnv`
Action space
The action space corresponds to the imaging parameters
Observation space
The observation space is a tuple, where
1. The current confocal image, the previous confocal and STED images
2. A vector of the selected actions and the obtained objectives in the episode
"""
metadata = {'render.modes': ['human']}
obj_names = ["Resolution", "Bleach", "SNR"]
def __init__(self, bleach_sampling="constant", actions=["p_sted"],
max_episode_steps=10, scale_nanodomain_reward=1.,
normalize_observations=False):
self.actions = actions
self.action_space = spaces.Box(
low=numpy.array([defaults.action_spaces[name]["low"] for name in self.actions]),
high=numpy.array([defaults.action_spaces[name]["high"] for name in self.actions]),
dtype=numpy.float32
)
self.observation_space = spaces.Tuple((
spaces.Box(0, 2**16, shape=(64, 64, 3), dtype=numpy.uint16),
spaces.Box(
0, 1, shape=(len(self.obj_names) * max_episode_steps + len(self.actions) * max_episode_steps,),
dtype=numpy.float32
) # Articulation, shape is given by objectives, actions at each steps
))
self.state = None
self.initial_count = None
self.synapse = None
self.current_step = 0
self.bleach_sampling = bleach_sampling
self.scale_nanodomain_reward = scale_nanodomain_reward
self.episode_memory = {
"actions" : [],
"mo_objs" : [],
"reward" : [],
}
self.datamap = None
self.viewer = None
# seed = self.seed(0)
molecules = 5
self.synapse_generator = SynapseGenerator(
mode="mushroom", seed=None, n_nanodomains=(3, 15), n_molecs_in_domain=(molecules * 20, molecules * 35)
)
self.microscope_generator = MicroscopeGenerator()
self.microscope = self.microscope_generator.generate_microscope()
self.bleach_sampler = BleachSampler(mode=self.bleach_sampling)
objs = OrderedDict({obj_name : obj_dict[obj_name] for obj_name in self.obj_names})
bounds = OrderedDict({obj_name : bounds_dict[obj_name] for obj_name in self.obj_names})
scales = OrderedDict({obj_name : scales_dict[obj_name] for obj_name in self.obj_names})
self.mo_reward_calculator = rewards.MORewardCalculator(objs, bounds=bounds, scales=scales)
self.nb_reward_calculator = rewards.NanodomainsRewardCalculator(
{"NbNanodomains" : obj_dict["NbNanodomains"]},
bounds={"NbNanodomains" : bounds_dict["NbNanodomains"]},
scales={"NbNanodomains" : scales_dict["NbNanodomains"]}
)
# Creates an action and objective normalizer
self.normalize_observations = normalize_observations
self.action_normalizer = Normalizer(self.actions, defaults.action_spaces)
self.obj_normalizer = Normalizer(self.obj_names, scales_dict)
def step(self, action):
"""
Method that should be implemented in the object that inherited
:param action: A `numpy.ndarray` of the action
"""
raise NotImplementedError
def reset(self):
"""
Resets the environment with a new datamap
:returns : A `numpy.ndarray` of the molecules
"""
# Updates the current bleach function
self.microscope = self.microscope_generator.generate_microscope(
phy_react=self.bleach_sampler.sample()
)
self.current_step = 0
self.episode_memory = {
"actions" : [],
"mo_objs" : [],
"reward" : [],
}
state = self._update_datamap()
self.state = numpy.stack((state, numpy.zeros_like(state), numpy.zeros_like(state)), axis=-1)
return (self.state, numpy.zeros((self.observation_space[1].shape[0],)))
def render(self, info, mode='human'):
"""
Renders the environment
:param info: A `dict` of data
:param mode: A `str` of the available mode
"""
fig, axes = pyplot.subplots(1, 3, figsize=(10,3), sharey=True, sharex=True)
axes[0].imshow(info["conf1"])
axes[0].set_title(f"Datamap roi")
axes[1].imshow(info["bleached"])
axes[1].set_title(f"Bleached datamap")
axes[2].imshow(info["sted_image"])
axes[2].set_title(f"Acquired signal (photons)")
pyplot.show(block=True)
def seed(self, seed=None):
"""
Seeds the environment
:param seed: An `int` of the random seed
"""
self.np_random, seed = seeding.np_random(seed)
self.bleach_sampler.seed(seed)
return [seed]
def update_(self, **kwargs):
"""
Utilitary method to update parameters in-place
"""
for key, value in kwargs.items():
setattr(self, key, value)
def _update_datamap(self):
"""
Generates a new `datamap` and acquires a confocal image. The new state of
the environment is returned
:returns : A `numpy.ndarray` of the state
"""
self.synapse = self.synapse_generator(rotate=True)
self.datamap = self.microscope_generator.generate_datamap(
datamap = {
"whole_datamap" : self.synapse.frame,
"datamap_pixelsize" : self.microscope_generator.pixelsize
}
)
# Acquire confocal image which sets the current state
conf_params = self.microscope_generator.generate_params()
state, _, _ = self.microscope.get_signal_and_bleach(
self.datamap, self.datamap.pixelsize, **conf_params, bleach=False
)
return state
def _acquire(self, action):
"""
Acquires from the `datamap` using the provided parameters. A confocal image
before and after the STED image are acquired to calculate the objectives
:param action: A `numpy.ndarray` of the imaging parameters
:returns : A `numpy.ndarray` of the acquired STED
A `numpy.ndarray` of the bleached `datamap`
A `numpy.ndarray` of the acquired conf1
A `numpy.ndarray` of the acquired conf2
A `numpy.ndarray` of the foreground in the STED image
A `numpy.ndarray` of the foreground in the conf1 image
"""
# Generates imaging parameters
sted_params = self.microscope_generator.generate_params(
imaging = {
name : action[self.actions.index(name)]
if name in self.actions else getattr(defaults, name.upper())
for name in ["pdt", "p_ex", "p_sted"]
}
)
conf_params = self.microscope_generator.generate_params()
# Acquire confocal image
conf1, bleached, _ = self.microscope.get_signal_and_bleach(
self.datamap, self.datamap.pixelsize, **conf_params, bleach=False
)
# Acquire STED image
sted_image, bleached, _ = self.microscope.get_signal_and_bleach(
self.datamap, self.datamap.pixelsize, **sted_params, bleach=True
)
# Acquire confocal image
conf2, bleached, _ = self.microscope.get_signal_and_bleach(
self.datamap, self.datamap.pixelsize, **conf_params, bleach=False
)
# foreground on confocal image
fg_c = get_foreground(conf1)
# foreground on sted image
if numpy.any(sted_image):
fg_s = get_foreground(sted_image)
else:
fg_s = numpy.ones_like(fg_c)
# remove STED foreground points not in confocal foreground, if any
fg_s *= fg_c
return sted_image, bleached["base"][self.datamap.roi], conf1, conf2, fg_s, fg_c
def close(self):
return None
class ContextualSTEDMultiObjectivesEnv(STEDMultiObjectivesEnv):
"""
Creates a `ContextualSTEDMultiObjectivesEnv`
Action space
The action space corresponds to the imaging parameters
Observation space
The observation space is a tuple, where
1. The current confocal image, the previous confocal and STED images
2. A vector of the selected actions and the obtained objectives in the episode
"""
metadata = {'render.modes': ['human']}
obj_names = ["Resolution", "Bleach", "SNR"]
def __init__(self, bleach_sampling="constant", actions=["p_sted"],
max_episode_steps=10, scale_nanodomain_reward=1.,
normalize_observations=False):
super(ContextualSTEDMultiObjectivesEnv, self).__init__(
bleach_sampling = bleach_sampling,
actions = actions,
max_episode_steps = max_episode_steps,
scale_nanodomain_reward = scale_nanodomain_reward,
normalize_observations=normalize_observations
)
def step(self, action):
# We manually clip the actions which are out of action space
action = numpy.clip(action, self.action_space.low, self.action_space.high)
# Acquire an image with the given parameters
sted_image, bleached, conf1, conf2, fg_s, fg_c = self._acquire(action)
mo_objs = self.mo_reward_calculator.evaluate(sted_image, conf1, conf2, fg_s, fg_c)
f1_score = self.nb_reward_calculator.evaluate(sted_image, conf1, conf2, fg_s, fg_c, synapse=self.synapse)
reward = f1_score * self.scale_nanodomain_reward
# Updates memory
done = self.current_step >= self.spec.max_episode_steps - 1
self.current_step += 1
self.episode_memory["mo_objs"].append(mo_objs)
self.episode_memory["actions"].append(action)
self.episode_memory["reward"].append(reward)
state = self._update_datamap()
self.state = numpy.stack((state, conf1, sted_image), axis=-1)
info = {
"action" : action,
"bleached" : bleached,
"sted_image" : sted_image,
"conf1" : conf1,
"conf2" : conf2,
"fg_c" : fg_c,
"fg_s" : fg_s,
"mo_objs" : mo_objs,
"reward" : reward,
"f1-score" : f1_score,
"nanodomains-coords" : self.synapse.nanodomains_coords
}
# Build the observation space
obs = []
for a, mo in zip(self.episode_memory["actions"], self.episode_memory["mo_objs"]):
obs.extend(self.action_normalizer(a) if self.normalize_observations else a)
obs.extend(self.obj_normalizer(mo) if self.normalize_observations else mo)
obs = numpy.pad(numpy.array(obs), (0, self.observation_space[1].shape[0] - len(obs)))
return (self.state, obs), reward, done, info
class ExpertDemonstrationF1ScoreSTEDMultiObjectivesEnv(STEDMultiObjectivesEnv):
"""
Creates a `ExpertDemonstrationSTEDMultiObjectivesEnv`
Action space
The action space corresponds to the imaging parameters
Observation space
The observation space is a tuple, where
1. The current confocal image, the previous confocal and STED images
2. A vector of the selected actions and the obtained objectives in the episode
"""
metadata = {'render.modes': ['human']}
obj_names = ["Resolution", "Bleach", "SNR"]
def __init__(self, bleach_sampling="constant", actions=["p_sted"],
max_episode_steps=10, scale_nanodomain_reward=1.,
normalize_observations=False):
super(ExpertDemonstrationF1ScoreSTEDMultiObjectivesEnv, self).__init__(
bleach_sampling = bleach_sampling,
actions = actions,
max_episode_steps = max_episode_steps,
scale_nanodomain_reward = scale_nanodomain_reward,
normalize_observations=normalize_observations
)
# Load expert demonstration
self.demonstrations = load_demonstrations()
def step(self, action):
"""
Implements a single step in the environment
:param action: A `numpy.ndarray` of the imaging parameters
:returns : A `tuple` of the observation
A `float` of the received reward
A `bool` whether the episode is finished
A `dict` of information about the episode
"""
# We manually clip the actions which are out of action space
action = numpy.clip(action, self.action_space.low, self.action_space.high)
# Acquire an image with the given parameters
sted_image, bleached, conf1, conf2, fg_s, fg_c = self._acquire(action)
mo_objs = self.mo_reward_calculator.evaluate(sted_image, conf1, conf2, fg_s, fg_c)
f1_score = self.nb_reward_calculator.evaluate(sted_image, conf1, conf2, fg_s, fg_c, synapse=self.synapse)
# Reward is proportionnal to the ranked position of the last image
sorted_indices = numpy.argsort(self.demonstrations + [f1_score])
index = numpy.argmax(sorted_indices).item()
# Reward is given by the position in the sorting
reward = (index + 1) / len(sorted_indices)
# Updates memory
done = self.current_step >= self.spec.max_episode_steps - 1
self.current_step += 1
self.episode_memory["mo_objs"].append(mo_objs)
self.episode_memory["actions"].append(action)
self.episode_memory["reward"].append(reward)
state = self._update_datamap()
self.state = numpy.stack((state, conf1, sted_image), axis=-1)
info = {
"action" : action,
"bleached" : bleached,
"sted_image" : sted_image,
"conf1" : conf1,
"conf2" : conf2,
"fg_c" : fg_c,
"fg_s" : fg_s,
"mo_objs" : mo_objs,
"reward" : reward,
"f1-score" : f1_score,
"nanodomains-coords" : self.synapse.nanodomains_coords
}
# Build the observation space
obs = []
for a, mo in zip(self.episode_memory["actions"], self.episode_memory["mo_objs"]):
obs.extend(self.action_normalizer(a) if self.normalize_observations else a)
obs.extend(self.obj_normalizer(mo) if self.normalize_observations else mo)
obs = numpy.pad(numpy.array(obs), (0, self.observation_space[1].shape[0] - len(obs)))
return (self.state, obs), reward, done, info
class HumanSTEDMultiObjectivesEnv(STEDMultiObjectivesEnv):
"""
Creates a `HumanSTEDMultiObjectivesEnv`
Action space
The action space corresponds to the imaging parameters
Observation space
The observation space is a tuple, where
1. The current confocal image, the previous confocal and STED images
2. A vector of the selected actions and the obtained objectives in the episode
"""
metadata = {'render.modes': ['human']}
obj_names = ["Resolution", "Bleach", "SNR"]
def __init__(self, bleach_sampling="constant", actions=["p_sted"],
max_episode_steps=10, scale_nanodomain_reward=1.,
normalize_observations=False):
super(HumanSTEDMultiObjectivesEnv, self).__init__(
bleach_sampling = bleach_sampling,
actions = actions,
max_episode_steps = max_episode_steps,
scale_nanodomain_reward = scale_nanodomain_reward,
normalize_observations = normalize_observations
)
def step(self, action):
"""
Implements a single step in the environment
:param action: A `numpy.ndarray` of the imaging parameters
:returns : A `tuple` of the observation
A `float` of the received reward
A `bool` whether the episode is finished
A `dict` of information about the episode
"""
# We manually clip the actions which are out of action space
action = numpy.clip(action, self.action_space.low, self.action_space.high)
# On the last step of the environment we enforce the final decision
final_action = self.current_step >= self.spec.max_episode_steps - 1
# Acquire an image with the given parameters
sted_image, bleached, conf1, conf2, fg_s, fg_c = self._acquire(action)
mo_objs = self.mo_reward_calculator.evaluate(sted_image, conf1, conf2, fg_s, fg_c)
f1_score = self.nb_reward_calculator.evaluate(sted_image, conf1, conf2, fg_s, fg_c, synapse=self.synapse)
reward = f1_score
done = False
if final_action:
reward += f1_score * self.scale_nanodomain_reward
done = True
# Updates memory
self.current_step += 1
self.episode_memory["mo_objs"].append(mo_objs)
self.episode_memory["actions"].append(action)
self.episode_memory["reward"].append(reward)
state = self._update_datamap()
self.state = numpy.stack((state, conf1, sted_image), axis=-1)
info = {
"action" : action,
"bleached" : bleached,
"sted_image" : sted_image,
"conf1" : conf1,
"conf2" : conf2,
"fg_c" : fg_c,
"fg_s" : fg_s,
"mo_objs" : mo_objs,
"reward" : reward,
"f1-score" : f1_score,
"nanodomains-coords" : self.synapse.nanodomains_coords
}
# Build the observation space
obs = []
for a, mo in zip(self.episode_memory["actions"], self.episode_memory["mo_objs"]):
obs.extend(self.action_normalizer(numpy.array(a)) if self.normalize_observations else a)
obs.extend(self.obj_normalizer(numpy.array(mo)) if self.normalize_observations else mo)
obs = numpy.pad(numpy.array(obs), (0, self.observation_space[1].shape[0] - len(obs)))
return (self.state, obs), reward, done, info
| [
"gym_sted.rewards.MORewardCalculator",
"numpy.argmax",
"numpy.clip",
"numpy.argsort",
"gym.utils.seeding.np_random",
"numpy.zeros_like",
"gym_sted.utils.BleachSampler",
"gym_sted.utils.SynapseGenerator",
"gym_sted.prefnet.load_demonstrations",
"matplotlib.pyplot.subplots",
"numpy.stack",
"matp... | [((2383, 2507), 'gym_sted.utils.SynapseGenerator', 'SynapseGenerator', ([], {'mode': '"""mushroom"""', 'seed': 'None', 'n_nanodomains': '(3, 15)', 'n_molecs_in_domain': '(molecules * 20, molecules * 35)'}), "(mode='mushroom', seed=None, n_nanodomains=(3, 15),\n n_molecs_in_domain=(molecules * 20, molecules * 35))\n", (2399, 2507), False, 'from gym_sted.utils import SynapseGenerator, MicroscopeGenerator, get_foreground, BleachSampler, Normalizer\n'), ((2562, 2583), 'gym_sted.utils.MicroscopeGenerator', 'MicroscopeGenerator', ([], {}), '()\n', (2581, 2583), False, 'from gym_sted.utils import SynapseGenerator, MicroscopeGenerator, get_foreground, BleachSampler, Normalizer\n'), ((2688, 2728), 'gym_sted.utils.BleachSampler', 'BleachSampler', ([], {'mode': 'self.bleach_sampling'}), '(mode=self.bleach_sampling)\n', (2701, 2728), False, 'from gym_sted.utils import SynapseGenerator, MicroscopeGenerator, get_foreground, BleachSampler, Normalizer\n'), ((2745, 2819), 'collections.OrderedDict', 'OrderedDict', (['{obj_name: obj_dict[obj_name] for obj_name in self.obj_names}'], {}), '({obj_name: obj_dict[obj_name] for obj_name in self.obj_names})\n', (2756, 2819), False, 'from collections import OrderedDict\n'), ((2838, 2915), 'collections.OrderedDict', 'OrderedDict', (['{obj_name: bounds_dict[obj_name] for obj_name in self.obj_names}'], {}), '({obj_name: bounds_dict[obj_name] for obj_name in self.obj_names})\n', (2849, 2915), False, 'from collections import OrderedDict\n'), ((2934, 3011), 'collections.OrderedDict', 'OrderedDict', (['{obj_name: scales_dict[obj_name] for obj_name in self.obj_names}'], {}), '({obj_name: scales_dict[obj_name] for obj_name in self.obj_names})\n', (2945, 3011), False, 'from collections import OrderedDict\n'), ((3049, 3111), 'gym_sted.rewards.MORewardCalculator', 'rewards.MORewardCalculator', (['objs'], {'bounds': 'bounds', 'scales': 'scales'}), '(objs, bounds=bounds, scales=scales)\n', (3075, 3111), False, 'from gym_sted import rewards, defaults\n'), ((3148, 3351), 'gym_sted.rewards.NanodomainsRewardCalculator', 'rewards.NanodomainsRewardCalculator', (["{'NbNanodomains': obj_dict['NbNanodomains']}"], {'bounds': "{'NbNanodomains': bounds_dict['NbNanodomains']}", 'scales': "{'NbNanodomains': scales_dict['NbNanodomains']}"}), "({'NbNanodomains': obj_dict[\n 'NbNanodomains']}, bounds={'NbNanodomains': bounds_dict['NbNanodomains'\n ]}, scales={'NbNanodomains': scales_dict['NbNanodomains']})\n", (3183, 3351), False, 'from gym_sted import rewards, defaults\n'), ((3539, 3587), 'gym_sted.utils.Normalizer', 'Normalizer', (['self.actions', 'defaults.action_spaces'], {}), '(self.actions, defaults.action_spaces)\n', (3549, 3587), False, 'from gym_sted.utils import SynapseGenerator, MicroscopeGenerator, get_foreground, BleachSampler, Normalizer\n'), ((3618, 3657), 'gym_sted.utils.Normalizer', 'Normalizer', (['self.obj_names', 'scales_dict'], {}), '(self.obj_names, scales_dict)\n', (3628, 3657), False, 'from gym_sted.utils import SynapseGenerator, MicroscopeGenerator, get_foreground, BleachSampler, Normalizer\n'), ((4790, 4854), 'matplotlib.pyplot.subplots', 'pyplot.subplots', (['(1)', '(3)'], {'figsize': '(10, 3)', 'sharey': '(True)', 'sharex': '(True)'}), '(1, 3, figsize=(10, 3), sharey=True, sharex=True)\n', (4805, 4854), False, 'from matplotlib import pyplot\n'), ((5133, 5156), 'matplotlib.pyplot.show', 'pyplot.show', ([], {'block': '(True)'}), '(block=True)\n', (5144, 5156), False, 'from matplotlib import pyplot\n'), ((5324, 5347), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (5341, 5347), False, 'from gym.utils import seeding\n'), ((8143, 8164), 'gym_sted.utils.get_foreground', 'get_foreground', (['conf1'], {}), '(conf1)\n', (8157, 8164), False, 'from gym_sted.utils import SynapseGenerator, MicroscopeGenerator, get_foreground, BleachSampler, Normalizer\n'), ((8211, 8232), 'numpy.any', 'numpy.any', (['sted_image'], {}), '(sted_image)\n', (8220, 8232), False, 'import numpy\n'), ((9732, 9797), 'numpy.clip', 'numpy.clip', (['action', 'self.action_space.low', 'self.action_space.high'], {}), '(action, self.action_space.low, self.action_space.high)\n', (9742, 9797), False, 'import numpy\n'), ((10541, 10589), 'numpy.stack', 'numpy.stack', (['(state, conf1, sted_image)'], {'axis': '(-1)'}), '((state, conf1, sted_image), axis=-1)\n', (10552, 10589), False, 'import numpy\n'), ((12633, 12654), 'gym_sted.prefnet.load_demonstrations', 'load_demonstrations', ([], {}), '()\n', (12652, 12654), False, 'from gym_sted.prefnet import load_demonstrations\n'), ((13136, 13201), 'numpy.clip', 'numpy.clip', (['action', 'self.action_space.low', 'self.action_space.high'], {}), '(action, self.action_space.low, self.action_space.high)\n', (13146, 13201), False, 'import numpy\n'), ((13641, 13688), 'numpy.argsort', 'numpy.argsort', (['(self.demonstrations + [f1_score])'], {}), '(self.demonstrations + [f1_score])\n', (13654, 13688), False, 'import numpy\n'), ((14197, 14245), 'numpy.stack', 'numpy.stack', (['(state, conf1, sted_image)'], {'axis': '(-1)'}), '((state, conf1, sted_image), axis=-1)\n', (14208, 14245), False, 'import numpy\n'), ((16649, 16714), 'numpy.clip', 'numpy.clip', (['action', 'self.action_space.low', 'self.action_space.high'], {}), '(action, self.action_space.low, self.action_space.high)\n', (16659, 16714), False, 'import numpy\n'), ((17646, 17694), 'numpy.stack', 'numpy.stack', (['(state, conf1, sted_image)'], {'axis': '(-1)'}), '((state, conf1, sted_image), axis=-1)\n', (17657, 17694), False, 'import numpy\n'), ((4529, 4579), 'numpy.zeros', 'numpy.zeros', (['(self.observation_space[1].shape[0],)'], {}), '((self.observation_space[1].shape[0],))\n', (4540, 4579), False, 'import numpy\n'), ((8253, 8279), 'gym_sted.utils.get_foreground', 'get_foreground', (['sted_image'], {}), '(sted_image)\n', (8267, 8279), False, 'from gym_sted.utils import SynapseGenerator, MicroscopeGenerator, get_foreground, BleachSampler, Normalizer\n'), ((8313, 8334), 'numpy.ones_like', 'numpy.ones_like', (['fg_c'], {}), '(fg_c)\n', (8328, 8334), False, 'import numpy\n'), ((11346, 11362), 'numpy.array', 'numpy.array', (['obs'], {}), '(obs)\n', (11357, 11362), False, 'import numpy\n'), ((15002, 15018), 'numpy.array', 'numpy.array', (['obs'], {}), '(obs)\n', (15013, 15018), False, 'import numpy\n'), ((18477, 18493), 'numpy.array', 'numpy.array', (['obs'], {}), '(obs)\n', (18488, 18493), False, 'import numpy\n'), ((1286, 1361), 'numpy.array', 'numpy.array', (["[defaults.action_spaces[name]['low'] for name in self.actions]"], {}), "([defaults.action_spaces[name]['low'] for name in self.actions])\n", (1297, 1361), False, 'import numpy\n'), ((1380, 1456), 'numpy.array', 'numpy.array', (["[defaults.action_spaces[name]['high'] for name in self.actions]"], {}), "([defaults.action_spaces[name]['high'] for name in self.actions])\n", (1391, 1456), False, 'import numpy\n'), ((1561, 1622), 'gym.spaces.Box', 'spaces.Box', (['(0)', '(2 ** 16)'], {'shape': '(64, 64, 3)', 'dtype': 'numpy.uint16'}), '(0, 2 ** 16, shape=(64, 64, 3), dtype=numpy.uint16)\n', (1571, 1622), False, 'from gym import error, spaces, utils\n'), ((4441, 4464), 'numpy.zeros_like', 'numpy.zeros_like', (['state'], {}), '(state)\n', (4457, 4464), False, 'import numpy\n'), ((4466, 4489), 'numpy.zeros_like', 'numpy.zeros_like', (['state'], {}), '(state)\n', (4482, 4489), False, 'import numpy\n'), ((13705, 13733), 'numpy.argmax', 'numpy.argmax', (['sorted_indices'], {}), '(sorted_indices)\n', (13717, 13733), False, 'import numpy\n'), ((18298, 18312), 'numpy.array', 'numpy.array', (['a'], {}), '(a)\n', (18309, 18312), False, 'import numpy\n'), ((18396, 18411), 'numpy.array', 'numpy.array', (['mo'], {}), '(mo)\n', (18407, 18411), False, 'import numpy\n')] |
import numpy as np
def getRating(matchData, plyrPubKey):
# the matchData is formatted as {pubKey: {rating: int,...},...}
"""
use highest parameter based total parameter values of all players
:param matchData: any data type to process by your function
:param plyrPubKey: to get the rating of this match, str
:return: the rating of the player, float
"""
enum = ["gold_per_min", "xp_per_min", "kills_per_min", "last_hits_per_min", "hero_damage_per_min",
"hero_healing_per_min", "tower_damage", "stuns_per_min"]
ratingBase = []
matchData = matchData[0]
for key, item in matchData.items():
ratingBase += [[matchData[key][j] for j in enum]]
ratingBase = list(np.asarray(ratingBase).sum(axis=0))
plyrallParam = [(matchData[plyrPubKey][enum[j]] / ratingBase[j]) if ratingBase[j] != 0 else 0 for j in
range(0, len(enum))]
plyrRating = max(plyrallParam)
return float(plyrRating) | [
"numpy.asarray"
] | [((744, 766), 'numpy.asarray', 'np.asarray', (['ratingBase'], {}), '(ratingBase)\n', (754, 766), True, 'import numpy as np\n')] |
'''
Based on
https://www.tensorflow.org/tutorials/images/cnn
'''
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
from collections.abc import Iterable
import numpy as np
'''
Define Swish Function
'''
from tensorflow.keras.layers import Layer
import tensorflow.keras.backend as K
def swish(x, beta=1.0):
return x * K.sigmoid(beta * x)
class Swish(Layer):
def __init__(self, beta=1.0, trainable=False, **kwargs):
super(Swish, self).__init__(**kwargs)
self.supports_masking = True
self.beta = beta
self.trainable = trainable
def build(self, input_shape):
self.beta_factor = K.variable(self.beta,
dtype=K.floatx(),
name='beta_factor')
if self.trainable:
self._trainable_weights.append(self.beta_factor)
super(Swish, self).build(input_shape)
def call(self, inputs, mask=None):
return swish(inputs, self.beta_factor)
def get_config(self):
config = {'beta': self.get_weights()[0] if self.trainable else self.beta,
'trainable': self.trainable}
base_config = super(Swish, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
'''
Get data
'''
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
# The CIFAR labels happen to be arrays,
# which is why you need the extra index
plt.xlabel(class_names[train_labels[i][0]])
plt.show()
'''
ReLU model
'''
r_model = models.Sequential()
r_model.add(layers.Conv2D(32, (3, 3), input_shape=(32, 32, 3)))
r_model.add(layers.Activation('relu'))
r_model.add(layers.MaxPooling2D((2, 2)))
r_model.add(layers.Conv2D(64, (3, 3)))
r_model.add(layers.Activation('relu'))
r_model.add(layers.MaxPooling2D((2, 2)))
r_model.add(layers.Conv2D(64, (3, 3)))
r_model.add(layers.Activation('relu'))
r_model.add(layers.Flatten())
r_model.add(layers.Dense(64))
r_model.add(layers.Activation('relu'))
r_model.add(layers.Dense(10))
r_model.summary()
r_model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
r_history = r_model.fit(train_images, train_labels, epochs=5,
validation_data=(test_images, test_labels))
'''
Swish model
'''
s_model = models.Sequential()
s_model.add(layers.Conv2D(32, (3, 3), input_shape=(32, 32, 3)))
s_model.add(Swish(beta=1.0, trainable=True,name='swish1'))
s_model.add(layers.MaxPooling2D((2, 2)))
s_model.add(layers.Conv2D(64, (3, 3)))
s_model.add(Swish(beta=1.0, trainable=True,name='swish2'))
s_model.add(layers.MaxPooling2D((2, 2)))
s_model.add(layers.Conv2D(64, (3, 3)))
s_model.add(Swish(beta=1.0, trainable=True,name='swish3'))
s_model.add(layers.Flatten())
s_model.add(layers.Dense(64))
s_model.add(Swish(beta=1.0, trainable=True,name='swish4'))
s_model.add(layers.Dense(10))
s_model.summary()
s_model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
s_history = s_model.fit(train_images, train_labels, epochs=5,
validation_data=(test_images, test_labels))
'''
Results
'''
plt.plot(r_history.history['accuracy'], label='relu accuracy')
plt.plot(r_history.history['val_accuracy'], label = 'relu val_accuracy')
plt.plot(s_history.history['accuracy'], label='swish accuracy')
plt.plot(s_history.history['val_accuracy'], label = 'swish val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0, 1])
plt.legend(loc='lower right')
plt.figure(0)
r_test_loss, r_test_acc = r_model.evaluate(test_images, test_labels, verbose=2)
s_test_loss, s_test_acc = s_model.evaluate(test_images, test_labels, verbose=2)
plt.savefig('cnn_cifar_10.png', bbox_inches='tight')
print(r_test_acc)
print(s_test_acc)
'''
Beta values
'''
swish1_beta = []
swish2_beta = []
swish3_beta = []
swish4_beta = []
swish1_preact = []
swish2_preact = []
swish3_preact = []
swish4_preact = []
n=range(5)
for i in n:
#reinitialize model
s_model = models.Sequential()
s_model.add(layers.Conv2D(32, (3, 3), input_shape=(32, 32, 3)))
s_model.add(Swish(beta=1.0, trainable=True,name='swish1'))
s_model.add(layers.MaxPooling2D((2, 2)))
s_model.add(layers.Conv2D(64, (3, 3)))
s_model.add(Swish(beta=1.0, trainable=True,name='swish2'))
s_model.add(layers.MaxPooling2D((2, 2)))
s_model.add(layers.Conv2D(64, (3, 3)))
s_model.add(Swish(beta=1.0, trainable=True,name='swish3'))
s_model.add(layers.Flatten())
s_model.add(layers.Dense(64))
s_model.add(Swish(beta=1.0, trainable=True,name='swish4'))
s_model.add(layers.Dense(10))
s_model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
s_history = s_model.fit(train_images, train_labels, epochs=5,
validation_data=(test_images, test_labels))
#append results of beta and preactivations
swish1_beta.append(s_model.get_layer(name = 'swish1').get_weights())
swish2_beta.append(s_model.get_layer(name = 'swish2').get_weights())
swish3_beta.append(s_model.get_layer(name = 'swish3').get_weights())
swish4_beta.append(s_model.get_layer(name = 'swish4').get_weights())
swish1_preact.append(s_model.get_layer(index = 0).get_weights()[0].tolist())
swish2_preact.append(s_model.get_layer(index = 3).get_weights()[0].tolist())
swish3_preact.append(s_model.get_layer(index = 6).get_weights()[0].tolist())
swish4_preact.append(s_model.get_layer(index = 9).get_weights()[0].tolist())
i += 1
print(i)
def flatten(l):
for el in l:
if isinstance(el, Iterable) and not isinstance(el, (str, bytes)):
yield from flatten(el)
else:
yield el
bins_beta = np.arange(0,3,0.1)
bins_preact = np.arange(-5,5,0.1)
swish1_beta = list(flatten(swish1_beta))
swish2_beta = list(flatten(swish2_beta))
swish3_beta = list(flatten(swish3_beta))
swish4_beta = list(flatten(swish4_beta))
swish1_preact = list(flatten(swish1_preact))
swish2_preact = list(flatten(swish2_preact))
swish3_preact = list(flatten(swish3_preact))
swish4_preact = list(flatten(swish4_preact))
plt.hist(x=swish1_beta[0], bins=bins_beta, alpha=0.7, rwidth=0.85)
plt.grid(axis='y', alpha=0.75)
plt.xlabel('Value')
plt.ylabel('Frequency')
plt.title('Trained Betas - Swish Layer 1')
maxfreq = n.max()
# Set a clean upper y-axis limit.
plt.ylim(ymax=np.ceil(maxfreq / 10) * 10 if maxfreq % 10 else maxfreq + 10)
plt.figure(1)
plt.savefig('cnn_cifar_10_beta1.png', bbox_inches='tight')
plt.hist(x=swish2_beta[0], bins=bins_beta, alpha=0.7, rwidth=0.85)
plt.grid(axis='y', alpha=0.75)
plt.xlabel('Value')
plt.ylabel('Frequency')
plt.title('Trained Betas - Swish Layer 2')
maxfreq = n.max()
# Set a clean upper y-axis limit.
plt.ylim(ymax=np.ceil(maxfreq / 10) * 10 if maxfreq % 10 else maxfreq + 10)
plt.figure(2)
plt.savefig('cnn_cifar_10_beta2.png', bbox_inches='tight')
plt.hist(x=swish3_beta[0], bins=bins_beta, alpha=0.7, rwidth=0.85)
plt.grid(axis='y', alpha=0.75)
plt.xlabel('Value')
plt.ylabel('Frequency')
plt.title('Trained Betas - Swish Layer 3')
maxfreq = n.max()
# Set a clean upper y-axis limit.
plt.ylim(ymax=np.ceil(maxfreq / 10) * 10 if maxfreq % 10 else maxfreq + 10)
plt.figure(3)
plt.savefig('cnn_cifar_10_beta3.png', bbox_inches='tight')
plt.hist(x=swish4_beta[0], bins=bins_beta, alpha=0.7, rwidth=0.85)
plt.grid(axis='y', alpha=0.75)
plt.xlabel('Value')
plt.ylabel('Frequency')
plt.title('Trained Betas - Swish Layer 4')
maxfreq = n.max()
# Set a clean upper y-axis limit.
plt.ylim(ymax=np.ceil(maxfreq / 10) * 10 if maxfreq % 10 else maxfreq + 10)
plt.figure(4)
plt.savefig('cnn_cifar_10_beta4.png', bbox_inches='tight')
plt.hist(x=swish1_preact[0], bins=bins_preact, alpha=0.7, rwidth=0.85)
plt.grid(axis='y', alpha=0.75)
plt.xlabel('Value')
plt.ylabel('Frequency')
plt.title('Preactivations - Swish Layer 1')
maxfreq = n.max()
# Set a clean upper y-axis limit.
plt.ylim(ymax=np.ceil(maxfreq / 10) * 10 if maxfreq % 10 else maxfreq + 10)
plt.figure(5)
plt.savefig('cnn_cifar_10_preact1.png', bbox_inches='tight')
plt.hist(x=swish2_preact[0], bins=bins_preact, alpha=0.7, rwidth=0.85)
plt.grid(axis='y', alpha=0.75)
plt.xlabel('Value')
plt.ylabel('Frequency')
plt.title('Preactivations - Swish Layer 2')
maxfreq = n.max()
# Set a clean upper y-axis limit.
plt.ylim(ymax=np.ceil(maxfreq / 10) * 10 if maxfreq % 10 else maxfreq + 10)
plt.figure(6)
plt.savefig('cnn_cifar_10_preact2.png', bbox_inches='tight')
plt.hist(x=swish3_preact[0], bins=bins_preact, alpha=0.7, rwidth=0.85)
plt.grid(axis='y', alpha=0.75)
plt.xlabel('Value')
plt.ylabel('Frequency')
plt.title('Preactivations - Swish Layer 3')
maxfreq = n.max()
# Set a clean upper y-axis limit.
plt.ylim(ymax=np.ceil(maxfreq / 10) * 10 if maxfreq % 10 else maxfreq + 10)
plt.figure(7)
plt.savefig('cnn_cifar_10_preact3.png', bbox_inches='tight')
plt.hist(x=swish4_preact[0], bins=bins_preact, alpha=0.7, rwidth=0.85)
plt.grid(axis='y', alpha=0.75)
plt.xlabel('Value')
plt.ylabel('Frequency')
plt.title('Preactivations - Swish Layer 4')
maxfreq = n.max()
# Set a clean upper y-axis limit.
plt.ylim(ymax=np.ceil(maxfreq / 10) * 10 if maxfreq % 10 else maxfreq + 10)
plt.figure(8)
plt.savefig('cnn_cifar_10_preact4.png', bbox_inches='tight')
| [
"matplotlib.pyplot.title",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Dense",
"matplotlib.pyplot.figure",
"numpy.arange",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.backend.... | [((1498, 1526), 'tensorflow.keras.datasets.cifar10.load_data', 'datasets.cifar10.load_data', ([], {}), '()\n', (1524, 1526), False, 'from tensorflow.keras import datasets, layers, models\n'), ((1767, 1795), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (1777, 1795), True, 'import matplotlib.pyplot as plt\n'), ((2087, 2097), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2095, 2097), True, 'import matplotlib.pyplot as plt\n'), ((2129, 2148), 'tensorflow.keras.models.Sequential', 'models.Sequential', ([], {}), '()\n', (2146, 2148), False, 'from tensorflow.keras import datasets, layers, models\n'), ((2956, 2975), 'tensorflow.keras.models.Sequential', 'models.Sequential', ([], {}), '()\n', (2973, 2975), False, 'from tensorflow.keras import datasets, layers, models\n'), ((3850, 3912), 'matplotlib.pyplot.plot', 'plt.plot', (["r_history.history['accuracy']"], {'label': '"""relu accuracy"""'}), "(r_history.history['accuracy'], label='relu accuracy')\n", (3858, 3912), True, 'import matplotlib.pyplot as plt\n'), ((3913, 3983), 'matplotlib.pyplot.plot', 'plt.plot', (["r_history.history['val_accuracy']"], {'label': '"""relu val_accuracy"""'}), "(r_history.history['val_accuracy'], label='relu val_accuracy')\n", (3921, 3983), True, 'import matplotlib.pyplot as plt\n'), ((3986, 4049), 'matplotlib.pyplot.plot', 'plt.plot', (["s_history.history['accuracy']"], {'label': '"""swish accuracy"""'}), "(s_history.history['accuracy'], label='swish accuracy')\n", (3994, 4049), True, 'import matplotlib.pyplot as plt\n'), ((4050, 4121), 'matplotlib.pyplot.plot', 'plt.plot', (["s_history.history['val_accuracy']"], {'label': '"""swish val_accuracy"""'}), "(s_history.history['val_accuracy'], label='swish val_accuracy')\n", (4058, 4121), True, 'import matplotlib.pyplot as plt\n'), ((4124, 4143), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (4134, 4143), True, 'import matplotlib.pyplot as plt\n'), ((4144, 4166), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (4154, 4166), True, 'import matplotlib.pyplot as plt\n'), ((4167, 4183), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (4175, 4183), True, 'import matplotlib.pyplot as plt\n'), ((4184, 4213), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (4194, 4213), True, 'import matplotlib.pyplot as plt\n'), ((4214, 4227), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (4224, 4227), True, 'import matplotlib.pyplot as plt\n'), ((4392, 4444), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""cnn_cifar_10.png"""'], {'bbox_inches': '"""tight"""'}), "('cnn_cifar_10.png', bbox_inches='tight')\n", (4403, 4444), True, 'import matplotlib.pyplot as plt\n'), ((6534, 6554), 'numpy.arange', 'np.arange', (['(0)', '(3)', '(0.1)'], {}), '(0, 3, 0.1)\n', (6543, 6554), True, 'import numpy as np\n'), ((6567, 6588), 'numpy.arange', 'np.arange', (['(-5)', '(5)', '(0.1)'], {}), '(-5, 5, 0.1)\n', (6576, 6588), True, 'import numpy as np\n'), ((6947, 7013), 'matplotlib.pyplot.hist', 'plt.hist', ([], {'x': 'swish1_beta[0]', 'bins': 'bins_beta', 'alpha': '(0.7)', 'rwidth': '(0.85)'}), '(x=swish1_beta[0], bins=bins_beta, alpha=0.7, rwidth=0.85)\n', (6955, 7013), True, 'import matplotlib.pyplot as plt\n'), ((7014, 7044), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'axis': '"""y"""', 'alpha': '(0.75)'}), "(axis='y', alpha=0.75)\n", (7022, 7044), True, 'import matplotlib.pyplot as plt\n'), ((7045, 7064), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Value"""'], {}), "('Value')\n", (7055, 7064), True, 'import matplotlib.pyplot as plt\n'), ((7065, 7088), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (7075, 7088), True, 'import matplotlib.pyplot as plt\n'), ((7089, 7131), 'matplotlib.pyplot.title', 'plt.title', (['"""Trained Betas - Swish Layer 1"""'], {}), "('Trained Betas - Swish Layer 1')\n", (7098, 7131), True, 'import matplotlib.pyplot as plt\n'), ((7260, 7273), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (7270, 7273), True, 'import matplotlib.pyplot as plt\n'), ((7275, 7333), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""cnn_cifar_10_beta1.png"""'], {'bbox_inches': '"""tight"""'}), "('cnn_cifar_10_beta1.png', bbox_inches='tight')\n", (7286, 7333), True, 'import matplotlib.pyplot as plt\n'), ((7335, 7401), 'matplotlib.pyplot.hist', 'plt.hist', ([], {'x': 'swish2_beta[0]', 'bins': 'bins_beta', 'alpha': '(0.7)', 'rwidth': '(0.85)'}), '(x=swish2_beta[0], bins=bins_beta, alpha=0.7, rwidth=0.85)\n', (7343, 7401), True, 'import matplotlib.pyplot as plt\n'), ((7402, 7432), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'axis': '"""y"""', 'alpha': '(0.75)'}), "(axis='y', alpha=0.75)\n", (7410, 7432), True, 'import matplotlib.pyplot as plt\n'), ((7433, 7452), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Value"""'], {}), "('Value')\n", (7443, 7452), True, 'import matplotlib.pyplot as plt\n'), ((7453, 7476), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (7463, 7476), True, 'import matplotlib.pyplot as plt\n'), ((7477, 7519), 'matplotlib.pyplot.title', 'plt.title', (['"""Trained Betas - Swish Layer 2"""'], {}), "('Trained Betas - Swish Layer 2')\n", (7486, 7519), True, 'import matplotlib.pyplot as plt\n'), ((7648, 7661), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (7658, 7661), True, 'import matplotlib.pyplot as plt\n'), ((7663, 7721), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""cnn_cifar_10_beta2.png"""'], {'bbox_inches': '"""tight"""'}), "('cnn_cifar_10_beta2.png', bbox_inches='tight')\n", (7674, 7721), True, 'import matplotlib.pyplot as plt\n'), ((7723, 7789), 'matplotlib.pyplot.hist', 'plt.hist', ([], {'x': 'swish3_beta[0]', 'bins': 'bins_beta', 'alpha': '(0.7)', 'rwidth': '(0.85)'}), '(x=swish3_beta[0], bins=bins_beta, alpha=0.7, rwidth=0.85)\n', (7731, 7789), True, 'import matplotlib.pyplot as plt\n'), ((7790, 7820), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'axis': '"""y"""', 'alpha': '(0.75)'}), "(axis='y', alpha=0.75)\n", (7798, 7820), True, 'import matplotlib.pyplot as plt\n'), ((7821, 7840), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Value"""'], {}), "('Value')\n", (7831, 7840), True, 'import matplotlib.pyplot as plt\n'), ((7841, 7864), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (7851, 7864), True, 'import matplotlib.pyplot as plt\n'), ((7865, 7907), 'matplotlib.pyplot.title', 'plt.title', (['"""Trained Betas - Swish Layer 3"""'], {}), "('Trained Betas - Swish Layer 3')\n", (7874, 7907), True, 'import matplotlib.pyplot as plt\n'), ((8036, 8049), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (8046, 8049), True, 'import matplotlib.pyplot as plt\n'), ((8051, 8109), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""cnn_cifar_10_beta3.png"""'], {'bbox_inches': '"""tight"""'}), "('cnn_cifar_10_beta3.png', bbox_inches='tight')\n", (8062, 8109), True, 'import matplotlib.pyplot as plt\n'), ((8111, 8177), 'matplotlib.pyplot.hist', 'plt.hist', ([], {'x': 'swish4_beta[0]', 'bins': 'bins_beta', 'alpha': '(0.7)', 'rwidth': '(0.85)'}), '(x=swish4_beta[0], bins=bins_beta, alpha=0.7, rwidth=0.85)\n', (8119, 8177), True, 'import matplotlib.pyplot as plt\n'), ((8178, 8208), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'axis': '"""y"""', 'alpha': '(0.75)'}), "(axis='y', alpha=0.75)\n", (8186, 8208), True, 'import matplotlib.pyplot as plt\n'), ((8209, 8228), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Value"""'], {}), "('Value')\n", (8219, 8228), True, 'import matplotlib.pyplot as plt\n'), ((8229, 8252), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (8239, 8252), True, 'import matplotlib.pyplot as plt\n'), ((8253, 8295), 'matplotlib.pyplot.title', 'plt.title', (['"""Trained Betas - Swish Layer 4"""'], {}), "('Trained Betas - Swish Layer 4')\n", (8262, 8295), True, 'import matplotlib.pyplot as plt\n'), ((8424, 8437), 'matplotlib.pyplot.figure', 'plt.figure', (['(4)'], {}), '(4)\n', (8434, 8437), True, 'import matplotlib.pyplot as plt\n'), ((8439, 8497), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""cnn_cifar_10_beta4.png"""'], {'bbox_inches': '"""tight"""'}), "('cnn_cifar_10_beta4.png', bbox_inches='tight')\n", (8450, 8497), True, 'import matplotlib.pyplot as plt\n'), ((8499, 8569), 'matplotlib.pyplot.hist', 'plt.hist', ([], {'x': 'swish1_preact[0]', 'bins': 'bins_preact', 'alpha': '(0.7)', 'rwidth': '(0.85)'}), '(x=swish1_preact[0], bins=bins_preact, alpha=0.7, rwidth=0.85)\n', (8507, 8569), True, 'import matplotlib.pyplot as plt\n'), ((8570, 8600), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'axis': '"""y"""', 'alpha': '(0.75)'}), "(axis='y', alpha=0.75)\n", (8578, 8600), True, 'import matplotlib.pyplot as plt\n'), ((8601, 8620), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Value"""'], {}), "('Value')\n", (8611, 8620), True, 'import matplotlib.pyplot as plt\n'), ((8621, 8644), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (8631, 8644), True, 'import matplotlib.pyplot as plt\n'), ((8645, 8688), 'matplotlib.pyplot.title', 'plt.title', (['"""Preactivations - Swish Layer 1"""'], {}), "('Preactivations - Swish Layer 1')\n", (8654, 8688), True, 'import matplotlib.pyplot as plt\n'), ((8817, 8830), 'matplotlib.pyplot.figure', 'plt.figure', (['(5)'], {}), '(5)\n', (8827, 8830), True, 'import matplotlib.pyplot as plt\n'), ((8832, 8892), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""cnn_cifar_10_preact1.png"""'], {'bbox_inches': '"""tight"""'}), "('cnn_cifar_10_preact1.png', bbox_inches='tight')\n", (8843, 8892), True, 'import matplotlib.pyplot as plt\n'), ((8894, 8964), 'matplotlib.pyplot.hist', 'plt.hist', ([], {'x': 'swish2_preact[0]', 'bins': 'bins_preact', 'alpha': '(0.7)', 'rwidth': '(0.85)'}), '(x=swish2_preact[0], bins=bins_preact, alpha=0.7, rwidth=0.85)\n', (8902, 8964), True, 'import matplotlib.pyplot as plt\n'), ((8965, 8995), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'axis': '"""y"""', 'alpha': '(0.75)'}), "(axis='y', alpha=0.75)\n", (8973, 8995), True, 'import matplotlib.pyplot as plt\n'), ((8996, 9015), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Value"""'], {}), "('Value')\n", (9006, 9015), True, 'import matplotlib.pyplot as plt\n'), ((9016, 9039), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (9026, 9039), True, 'import matplotlib.pyplot as plt\n'), ((9040, 9083), 'matplotlib.pyplot.title', 'plt.title', (['"""Preactivations - Swish Layer 2"""'], {}), "('Preactivations - Swish Layer 2')\n", (9049, 9083), True, 'import matplotlib.pyplot as plt\n'), ((9212, 9225), 'matplotlib.pyplot.figure', 'plt.figure', (['(6)'], {}), '(6)\n', (9222, 9225), True, 'import matplotlib.pyplot as plt\n'), ((9227, 9287), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""cnn_cifar_10_preact2.png"""'], {'bbox_inches': '"""tight"""'}), "('cnn_cifar_10_preact2.png', bbox_inches='tight')\n", (9238, 9287), True, 'import matplotlib.pyplot as plt\n'), ((9289, 9359), 'matplotlib.pyplot.hist', 'plt.hist', ([], {'x': 'swish3_preact[0]', 'bins': 'bins_preact', 'alpha': '(0.7)', 'rwidth': '(0.85)'}), '(x=swish3_preact[0], bins=bins_preact, alpha=0.7, rwidth=0.85)\n', (9297, 9359), True, 'import matplotlib.pyplot as plt\n'), ((9360, 9390), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'axis': '"""y"""', 'alpha': '(0.75)'}), "(axis='y', alpha=0.75)\n", (9368, 9390), True, 'import matplotlib.pyplot as plt\n'), ((9391, 9410), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Value"""'], {}), "('Value')\n", (9401, 9410), True, 'import matplotlib.pyplot as plt\n'), ((9411, 9434), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (9421, 9434), True, 'import matplotlib.pyplot as plt\n'), ((9435, 9478), 'matplotlib.pyplot.title', 'plt.title', (['"""Preactivations - Swish Layer 3"""'], {}), "('Preactivations - Swish Layer 3')\n", (9444, 9478), True, 'import matplotlib.pyplot as plt\n'), ((9607, 9620), 'matplotlib.pyplot.figure', 'plt.figure', (['(7)'], {}), '(7)\n', (9617, 9620), True, 'import matplotlib.pyplot as plt\n'), ((9622, 9682), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""cnn_cifar_10_preact3.png"""'], {'bbox_inches': '"""tight"""'}), "('cnn_cifar_10_preact3.png', bbox_inches='tight')\n", (9633, 9682), True, 'import matplotlib.pyplot as plt\n'), ((9684, 9754), 'matplotlib.pyplot.hist', 'plt.hist', ([], {'x': 'swish4_preact[0]', 'bins': 'bins_preact', 'alpha': '(0.7)', 'rwidth': '(0.85)'}), '(x=swish4_preact[0], bins=bins_preact, alpha=0.7, rwidth=0.85)\n', (9692, 9754), True, 'import matplotlib.pyplot as plt\n'), ((9755, 9785), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'axis': '"""y"""', 'alpha': '(0.75)'}), "(axis='y', alpha=0.75)\n", (9763, 9785), True, 'import matplotlib.pyplot as plt\n'), ((9786, 9805), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Value"""'], {}), "('Value')\n", (9796, 9805), True, 'import matplotlib.pyplot as plt\n'), ((9806, 9829), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (9816, 9829), True, 'import matplotlib.pyplot as plt\n'), ((9830, 9873), 'matplotlib.pyplot.title', 'plt.title', (['"""Preactivations - Swish Layer 4"""'], {}), "('Preactivations - Swish Layer 4')\n", (9839, 9873), True, 'import matplotlib.pyplot as plt\n'), ((10002, 10015), 'matplotlib.pyplot.figure', 'plt.figure', (['(8)'], {}), '(8)\n', (10012, 10015), True, 'import matplotlib.pyplot as plt\n'), ((10017, 10077), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""cnn_cifar_10_preact4.png"""'], {'bbox_inches': '"""tight"""'}), "('cnn_cifar_10_preact4.png', bbox_inches='tight')\n", (10028, 10077), True, 'import matplotlib.pyplot as plt\n'), ((1819, 1843), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(5)', '(5)', '(i + 1)'], {}), '(5, 5, i + 1)\n', (1830, 1843), True, 'import matplotlib.pyplot as plt\n'), ((1844, 1858), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (1854, 1858), True, 'import matplotlib.pyplot as plt\n'), ((1863, 1877), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (1873, 1877), True, 'import matplotlib.pyplot as plt\n'), ((1882, 1897), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (1890, 1897), True, 'import matplotlib.pyplot as plt\n'), ((1902, 1949), 'matplotlib.pyplot.imshow', 'plt.imshow', (['train_images[i]'], {'cmap': 'plt.cm.binary'}), '(train_images[i], cmap=plt.cm.binary)\n', (1912, 1949), True, 'import matplotlib.pyplot as plt\n'), ((2043, 2086), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['class_names[train_labels[i][0]]'], {}), '(class_names[train_labels[i][0]])\n', (2053, 2086), True, 'import matplotlib.pyplot as plt\n'), ((2161, 2211), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(32)', '(3, 3)'], {'input_shape': '(32, 32, 3)'}), '(32, (3, 3), input_shape=(32, 32, 3))\n', (2174, 2211), False, 'from tensorflow.keras import datasets, layers, models\n'), ((2225, 2250), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (2242, 2250), False, 'from tensorflow.keras import datasets, layers, models\n'), ((2264, 2291), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (2283, 2291), False, 'from tensorflow.keras import datasets, layers, models\n'), ((2305, 2330), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(64)', '(3, 3)'], {}), '(64, (3, 3))\n', (2318, 2330), False, 'from tensorflow.keras import datasets, layers, models\n'), ((2344, 2369), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (2361, 2369), False, 'from tensorflow.keras import datasets, layers, models\n'), ((2383, 2410), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (2402, 2410), False, 'from tensorflow.keras import datasets, layers, models\n'), ((2424, 2449), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(64)', '(3, 3)'], {}), '(64, (3, 3))\n', (2437, 2449), False, 'from tensorflow.keras import datasets, layers, models\n'), ((2463, 2488), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (2480, 2488), False, 'from tensorflow.keras import datasets, layers, models\n'), ((2504, 2520), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (2518, 2520), False, 'from tensorflow.keras import datasets, layers, models\n'), ((2534, 2550), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {}), '(64)\n', (2546, 2550), False, 'from tensorflow.keras import datasets, layers, models\n'), ((2564, 2589), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), "('relu')\n", (2581, 2589), False, 'from tensorflow.keras import datasets, layers, models\n'), ((2604, 2620), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(10)'], {}), '(10)\n', (2616, 2620), False, 'from tensorflow.keras import datasets, layers, models\n'), ((2988, 3038), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(32)', '(3, 3)'], {'input_shape': '(32, 32, 3)'}), '(32, (3, 3), input_shape=(32, 32, 3))\n', (3001, 3038), False, 'from tensorflow.keras import datasets, layers, models\n'), ((3111, 3138), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (3130, 3138), False, 'from tensorflow.keras import datasets, layers, models\n'), ((3152, 3177), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(64)', '(3, 3)'], {}), '(64, (3, 3))\n', (3165, 3177), False, 'from tensorflow.keras import datasets, layers, models\n'), ((3250, 3277), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (3269, 3277), False, 'from tensorflow.keras import datasets, layers, models\n'), ((3291, 3316), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(64)', '(3, 3)'], {}), '(64, (3, 3))\n', (3304, 3316), False, 'from tensorflow.keras import datasets, layers, models\n'), ((3392, 3408), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (3406, 3408), False, 'from tensorflow.keras import datasets, layers, models\n'), ((3422, 3438), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {}), '(64)\n', (3434, 3438), False, 'from tensorflow.keras import datasets, layers, models\n'), ((3512, 3528), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(10)'], {}), '(10)\n', (3524, 3528), False, 'from tensorflow.keras import datasets, layers, models\n'), ((4715, 4734), 'tensorflow.keras.models.Sequential', 'models.Sequential', ([], {}), '()\n', (4732, 4734), False, 'from tensorflow.keras import datasets, layers, models\n'), ((395, 414), 'tensorflow.keras.backend.sigmoid', 'K.sigmoid', (['(beta * x)'], {}), '(beta * x)\n', (404, 414), True, 'import tensorflow.keras.backend as K\n'), ((2695, 2758), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (2740, 2758), True, 'import tensorflow as tf\n'), ((3603, 3666), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (3648, 3666), True, 'import tensorflow as tf\n'), ((4751, 4801), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(32)', '(3, 3)'], {'input_shape': '(32, 32, 3)'}), '(32, (3, 3), input_shape=(32, 32, 3))\n', (4764, 4801), False, 'from tensorflow.keras import datasets, layers, models\n'), ((4882, 4909), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (4901, 4909), False, 'from tensorflow.keras import datasets, layers, models\n'), ((4927, 4952), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(64)', '(3, 3)'], {}), '(64, (3, 3))\n', (4940, 4952), False, 'from tensorflow.keras import datasets, layers, models\n'), ((5033, 5060), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (5052, 5060), False, 'from tensorflow.keras import datasets, layers, models\n'), ((5078, 5103), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(64)', '(3, 3)'], {}), '(64, (3, 3))\n', (5091, 5103), False, 'from tensorflow.keras import datasets, layers, models\n'), ((5184, 5200), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (5198, 5200), False, 'from tensorflow.keras import datasets, layers, models\n'), ((5218, 5234), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {}), '(64)\n', (5230, 5234), False, 'from tensorflow.keras import datasets, layers, models\n'), ((5315, 5331), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(10)'], {}), '(10)\n', (5327, 5331), False, 'from tensorflow.keras import datasets, layers, models\n'), ((5391, 5454), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (5436, 5454), True, 'import tensorflow as tf\n'), ((770, 780), 'tensorflow.keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (778, 780), True, 'import tensorflow.keras.backend as K\n'), ((7198, 7219), 'numpy.ceil', 'np.ceil', (['(maxfreq / 10)'], {}), '(maxfreq / 10)\n', (7205, 7219), True, 'import numpy as np\n'), ((7586, 7607), 'numpy.ceil', 'np.ceil', (['(maxfreq / 10)'], {}), '(maxfreq / 10)\n', (7593, 7607), True, 'import numpy as np\n'), ((7974, 7995), 'numpy.ceil', 'np.ceil', (['(maxfreq / 10)'], {}), '(maxfreq / 10)\n', (7981, 7995), True, 'import numpy as np\n'), ((8362, 8383), 'numpy.ceil', 'np.ceil', (['(maxfreq / 10)'], {}), '(maxfreq / 10)\n', (8369, 8383), True, 'import numpy as np\n'), ((8755, 8776), 'numpy.ceil', 'np.ceil', (['(maxfreq / 10)'], {}), '(maxfreq / 10)\n', (8762, 8776), True, 'import numpy as np\n'), ((9150, 9171), 'numpy.ceil', 'np.ceil', (['(maxfreq / 10)'], {}), '(maxfreq / 10)\n', (9157, 9171), True, 'import numpy as np\n'), ((9545, 9566), 'numpy.ceil', 'np.ceil', (['(maxfreq / 10)'], {}), '(maxfreq / 10)\n', (9552, 9566), True, 'import numpy as np\n'), ((9940, 9961), 'numpy.ceil', 'np.ceil', (['(maxfreq / 10)'], {}), '(maxfreq / 10)\n', (9947, 9961), True, 'import numpy as np\n')] |
# import clingo
import numpy as np
import copy
import random
#
# import torch
# from torch import nn
# import torch.nn.functional as F
# from torch import optim
import math
from HierarchicalAgentFlatQ import *
import pandas as pd
class Recursive_Maze():
def __init__(self, shape_maze,simple,manager_layers,man_view,search_clause=False):
self.reward_structure=simple
self.n_layers = manager_layers
self.manager_view=man_view
self.search_clause=search_clause
self.dim_maze = shape_maze
self.generate_maze(self.dim_maze, simple)
# self.goal_state()
self.initiate_goal_state()
self.initiate_states()
self.loc = self.agent_init_state
self.ns = self.maze.shape[0] ** 2
self.na = 4
self.init_maze_hierarchy()
self.current_level = 0
self.current_tasks_loc = copy.copy(self.super_managers)
self.tasks=[4 for x in self.current_tasks_loc]
self.hierarchy_actions = [4 for x in range(int(self.n_layers))]
# need to keep track if we are in the right location according to our super manager
self.tasks_bools = np.ones(len(self.current_tasks_loc))
self.lims = self.get_super_manager_1([self.maze.shape[1], self.maze.shape[1]])
self.search_lims = [4*x[0] for x in self.lims][::-1]
# self.search_lims = [12000 for x in self.lims][::-1]
# self.state_visit=np.zer
self.search_lims[-1]=np.maximum(4,self.search_lims[-1])
self.current_state=self.super_managers[self.current_level]
# print(self.search_lims)
self.reward_per_level = [0 for x in range(int(self.n_layers+1))]
self.reset_reward= [0 for x in range(int(self.n_layers+1))]
self.check_dicts={}
self.expected_level=0
self.check_dicts[0] = {}
# self.check_dicts[0]=[0,0]
for i in range(int(self.n_layers+1)):
self.check_dicts[0][i]={0:0,1:0,2:0,3:0,4:0,5:0}
# print(self.maze)
# print(self.loc)
assert self.maze[int(self.loc[0]), int(self.loc[1])] == 0
assert self.maze[int(self.agent_init_state[0]), int(self.agent_init_state[1])] == 0
def get_super_manager(self):
super_managers = []
number_of_levels = self.n_layers
super_managers.append([np.floor(x / self.manager_view) for x in self.loc])
if number_of_levels - 1 > 1:
for i in range(int(number_of_levels - 1)):
super_managers.append([np.floor(x / self.manager_view) for x in super_managers[-1]])
# print(i)
else:
super_managers.append([0, 0])
return super_managers[::-1]
def initiate_goal_state(self):
empty_cells = [[x, y] for x, y in zip(np.where(self.maze == 0)[0], np.where(self.maze == 0)[1])]
self.goal_init_state=random.choice(empty_cells)
def init_super_manager(self):
n_layers = self.n_layers + 1
self.super_managers = []
number_of_levels = n_layers
self.super_managers.append([np.floor(x / self.manager_view) for x in self.loc])
if number_of_levels - 2 > 1:
for i in range(int(number_of_levels - 2)):
self.super_managers.append([np.floor(x / self.manager_view) for x in self.super_managers[-1]])
else:
self.super_managers.append([0, 0])
self.super_managers = self.super_managers[::-1]
def init_maze_hierarchy(self):
# nlayers = math.log(self.maze.shape[0], 2)
self.init_super_manager()
number_of_levels=self.n_layers + 1
self.goal_locs = []
self.goal_locs.append([np.floor(x / self.manager_view) for x in self.goal_init_state])
if number_of_levels - 2 > 1:
for i in range(int(number_of_levels - 2)):
self.goal_locs.append([np.floor(x / self.manager_view) for x in self.goal_locs[-1]])
else:
self.goal_locs.append([0, 0])
self.goal_locs = self.goal_locs[::-1]
def initiate_states(self):
empty_cells = [[x, y] for x, y in zip(np.where(self.maze == 0)[0], np.where(self.maze == 0)[1])]
self.agent_init_state = random.choice(empty_cells)
if self.agent_init_state==self.goal_init_state:
print('Reinitializing state')
self.initiate_states()
# print(agent_init_state)
def reset_rewards_after_learning(self,old_reset):
for i,x in enumerate(old_reset):
if x !=0:
self.reward_per_level[i] = 0
# if i!=self.n_layers:
# self.tasks_bools[i]=1
# def check_if_manager_change(self, current_level,new_state,old_state,d):
def check_if_task_satisfied_at_senior_level(self, current_level,new_state,old_state,d):
locs = self.get_super_manager_1(self.loc)
locs1 = self.get_super_manager_1(old_state)
locs2 = self.get_super_manager_1(new_state)
# print('Expected level 0', self.expected_level)
if current_level != 0:
if self.current_tasks_loc[current_level - 1] == locs[current_level - 1]:
if locs1[current_level-1]!=locs2[current_level-1]:
if self.tasks_bools[current_level - 1] != 1:
if self.hierarchy_actions[current_level-1]==4:
if d:
print('task satisfied')
self.reset_reward[current_level - 1] = 5
self.reset_reward[current_level] = 5
else:
self.reset_reward[current_level - 1] = 0
self.reset_reward[current_level] = 0
else:
print('stask satisfied')
# print('level',current_level)
# print('moving from', old_state)
# print('movign to ', new_state)
# print('task', self.hierarchy_actions[current_level - 1])
# print('current taks ', self.current_tasks_loc[current_level-1])
# print('task satisfied at level',current_level-1)
self.reset_reward[current_level-1]=1
self.reset_reward[current_level]=1
self.tasks_bools[current_level - 1] = 1
self.expected_level = np.maximum(self.expected_level - 1,0)
# print('Current level',current_level)
current_level = current_level - 1
# print('Expected level',self.expected_level)
self.check_if_task_satisfied_at_senior_level( current_level,new_state,old_state,d)
else:
self.expected_level=current_level
# print('Expected level TB', self.expected_level)
else:
self.expected_level = current_level
elif locs1 != locs2:
if self.hierarchy_actions[current_level - 1] != 4:
print('moving from', old_state)
print('movign to ', new_state)
print('task',self.hierarchy_actions[current_level-1])
print('change managerial level', current_level - 1)
# if d:
# self.reset_reward[current_level - 1] = 5
# self.reset_reward[current_level] = 5
# else:
# print('TaskFailed')
self.reset_reward[current_level - 1] = 3
self.reset_reward[current_level] = 3
self.expected_level = np.maximum(self.expected_level - 1, 0)
# print('EL', self.expected_level)
current_level = current_level - 1
# self.tasks_bools[current_level - 1]=1
self.check_if_task_satisfied_at_senior_level(current_level, new_state, old_state, d)
else:
print('moving from', old_state)
print('movign to ', new_state)
print('task', self.hierarchy_actions[current_level - 1])
print('change managerial level', current_level - 1)
# if d:
# self.reset_reward[current_level - 1] = 5
# self.reset_reward[current_level] = 5
# else:
# print('TaskFailed')
self.reset_reward[current_level - 1] = 3
self.reset_reward[current_level] = 3
self.expected_level = np.maximum(self.expected_level - 1, 0)
# print('EL',self.expected_level)
current_level = current_level - 1
# self.tasks_bools[current_level - 1] = 1
self.check_if_task_satisfied_at_senior_level(current_level, new_state, old_state, d)
else:
# print('no change')
self.expected_level=current_level
# print('EL', self.expected_level)
def check_if_search_limit_breached(self, current_level,d):
if current_level - 1 >= 0:
if abs(self.reward_per_level[current_level - 1]) >= self.search_lims[current_level - 1]:
print('SearchLimitBreached')
if d:
self.reset_reward[current_level - 1] = 5
self.reset_reward[current_level] = 5
else:
self.reset_reward[current_level - 1] = 2
self.reset_reward[current_level] = 2
self.expected_level = np.maximum(self.expected_level - 1,0)
# self.reward_per_level[current_level] = 0
# self.reward_per_level[self.current_level] = 0
# print('limit breached', current_level - 1)
# self.tasks_bools[current_level - 1] = 1
current_level = current_level - 1
if current_level!=0:
self.check_if_search_limit_breached( current_level,d)
# else:
# self.expected_level=current_level
# print('EL5', self.expected_level)
def checks(self,current_level,new_state,old_state,d):
for l in range(current_level, 0, -1):
self.check_if_search_limit_breached( l,d)
def get_possible_actions(self,current_loc,current_level):
super_goals=copy.copy(self.get_super_manager_1(self.goal_init_state))
current_man=copy.copy(self.get_super_manager_1(current_loc))
if current_level!=self.n_layers:
lim = self.lims[current_level][0]
if current_level==0:
self.possible_actions=[4]
else:
current_loc = current_man[current_level]
if self.search_clause:
if super_goals[current_level]==current_man[current_level]:
self.possible_actions= [4]
else:
self.possible_actions= []
if current_loc[0] - 1 >= 0:
self.possible_actions.append(0)
if current_loc[0] + 1 <= lim - 1:
self.possible_actions.append(1)
if current_loc[1] + 1 <= lim - 1:
self.possible_actions.append(2)
if current_loc[1] - 1 >= 0:
self.possible_actions.append(3)
else:
self.possible_actions = []
if current_loc[0] - 1 >= 0:
self.possible_actions.append(0)
if current_loc[0] + 1 <= lim - 1:
self.possible_actions.append(1)
if current_loc[1] + 1 <= lim - 1:
self.possible_actions.append(2)
if current_loc[1] - 1 >= 0:
self.possible_actions.append(3)
self.possible_actions.append(4)
else:
# self.possible_actions=[]
# if current_loc[0]-1>=0:
# self.possible_actions.append(0)
# if current_loc[0]+1<= self.maze.shape[0] - 1:
# self.possible_actions.append(1)
# if current_loc[1]+1<= self.maze.shape[0] - 1:
# self.possible_actions.append(2)
# if current_loc[1]-1>=0:
# self.possible_actions.append(3)
self.possible_actions = [0,1,2,3]
return self.possible_actions
def step(self, action,steps):
current_loc = copy.copy(self.loc)
current_level = copy.copy(self.current_level)
if current_level == 0:
# action=5
self.hierarchy_actions[current_level] = 4
self.current_level = current_level + 1
elif current_level == self.n_layers:
assert action<=3
# 0,1,2,3,4 --> NSEW*
if action == 0:
new_row = int(self.loc[0] - 1)
new_col = int(self.loc[1])
if new_row >= 0:
if self.maze[new_row][new_col] != 1:
self.loc = [new_row, new_col]
else:
self.reset_reward[current_level] = 0
else:
self.reset_reward[current_level]=0
if action == 1:
new_row = int(self.loc[0] + 1)
new_col = int(self.loc[1])
if new_row <= self.maze.shape[0] - 1:
if self.maze[new_row][new_col] != 1:
self.loc = [new_row, new_col]
else:
self.reset_reward[current_level] = 0
else:
self.reset_reward[current_level]=0
if action == 2:
new_row = int(self.loc[0])
new_col = int(self.loc[1] + 1)
if new_col <= self.maze.shape[0] - 1:
if self.maze[new_row][new_col] != 1:
self.loc = [new_row, new_col]
else:
self.reset_reward[current_level] = 0
else:
self.reset_reward[current_level]=0
if action == 3:
new_row = int(self.loc[0])
new_col = int(self.loc[1] - 1)
if new_col >= 0:
if self.maze[new_row][new_col] != 1:
self.loc = [new_row, new_col]
else:
self.reset_reward[current_level] = 0
else:
self.reset_reward[current_level]=0
else:
# self.hierarchy_actions[current_level] = action
current_locs = self.get_super_manager_1(self.loc)[current_level]
lim = self.lims[current_level][0]
if action == 0:
new_row = int(current_locs[0] - 1)
new_col = int(current_locs[1])
if new_row >= 0:
self.current_tasks_loc[current_level] = [new_row, new_col]
# indicates new task set
self.tasks_bools[current_level] = 0
self.hierarchy_actions[current_level] = action
self.current_level = current_level + 1
else:
self.reset_reward[current_level]=0
if action == 1:
new_row = int(current_locs[0] + 1)
new_col = int(current_locs[1])
if new_row < lim:
self.current_tasks_loc[current_level] = [new_row, new_col]
self.tasks_bools[current_level] = 0
self.hierarchy_actions[current_level] = action
self.current_level = current_level + 1
else:
self.reset_reward[current_level]=0
if action == 2:
new_row = int(current_locs[0])
new_col = int(current_locs[1] + 1)
if new_col < lim:
self.current_tasks_loc[current_level] = [new_row, new_col]
self.tasks_bools[current_level] = 0
self.hierarchy_actions[current_level] = action
self.current_level = current_level + 1
else:
self.reset_reward[current_level]=0
if action == 3:
new_row = int(current_locs[0])
new_col = int(current_locs[1] - 1)
if new_col >= 0:
self.current_tasks_loc[current_level] = [new_row, new_col]
self.tasks_bools[current_level] = 0
self.hierarchy_actions[current_level] = action
self.current_level = current_level + 1
else:
self.reset_reward[current_level]=0
if action == 4:
new_row = int(current_locs[0])
new_col = int(current_locs[1] )
self.current_tasks_loc[current_level] = [new_row, new_col]
self.hierarchy_actions[current_level] = action
self.current_level = current_level + 1
if self.goal_init_state == self.loc:
done = True
else:
done = False
if done==True:
self.reset_reward=[5 for x in self.reset_reward]
if self.current_level!=self.n_layers:
self.current_state=self.get_super_manager_1(self.loc)[self.current_level]
else:
self.current_state=self.loc
return self.loc, self.current_level,self.current_state, done, {}
def reset(self):
self.initiate_states()
self.loc = self.agent_init_state
self.init_super_manager()
self.reward_per_level = [0 for x in self.reward_per_level]
self.reset_rewards_after_learning([0,0,0])
self.steps_max=0
self.current_level = 0
self.current_tasks_loc = copy.copy(self.super_managers)
self.tasks = [4 for x in self.current_tasks_loc]
self.hierarchy_actions = [4 for x in range(int(self.n_layers))]
# need to keep track if we are in the right location according to our super manager
self.tasks_bools = np.ones(len(self.current_tasks_loc))
self.reset_reward = [0 for x in range(int(self.n_layers + 1))]
self.reward_per_level = [0 for x in range(int(self.n_layers + 1))]
self.reset_reward = [0 for x in range(int(self.n_layers + 1))]
return self.loc
# reset state to something
# return None
def get_reward(self,steps):
done = False
reward_dict={}
if self.reward_structure==1:
reward_dict['DoneAndSearch']=0
reward_dict['DoneAndNoSearch'] = 0
reward_dict['TaskFailed'] = -10
reward_dict['SearchLimit'] = -1
reward_dict['TaskSatisfied'] = 0
reward_dict['Wall'] = -1
elif self.reward_structure==2:
reward_dict['DoneAndSearch'] = 1
reward_dict['DoneAndNoSearch'] = 0
reward_dict['TaskFailed'] = -10
reward_dict['SearchLimit'] = -1
reward_dict['TaskSatisfied'] = 0
reward_dict['Wall'] = -1
elif self.reward_structure==3:
reward_dict['DoneAndSearch'] = 100
reward_dict['DoneAndNoSearch'] = 0
reward_dict['TaskFailed'] = -10
reward_dict['SearchLimit'] = -1
reward_dict['TaskSatisfied'] = 10
reward_dict['Wall'] = -1
if self.current_level == self.n_layers:
# if find goal but manager didn't say search don't reward
if self.loc == self.goal_init_state:
for i in range(int(self.n_layers)):
x = self.reward_per_level[i]
if self.hierarchy_actions[i]==4:
# if self.tasks_bools[i]!=1:
self.reward_per_level[i] = reward_dict['DoneAndSearch']
# /max(1,np.abs(x))
self.reward_per_level[-1] = reward_dict['DoneAndSearch']
# if i==1:
# print('Searching and finding ')
# print('Level:', i)
# print('Rewards',self.reward_per_level)
# print('Tasks',self.hierarchy_actions)
else:
x = self.reward_per_level[i]
self.reward_per_level[i] = reward_dict['DoneAndNoSearch']
self.reward_per_level[-1] = reward_dict['DoneAndNoSearch']
# print('Not Searching and finding ')
# print('Level:', i)
# print('Rewards', self.reward_per_level)
# print('Tasks', self.hierarchy_actions)
# self.maze.shape[0]
print('Goal Found')
done = True
else:
if steps>0:
# print(self.reset_reward)
# print('....')
for i in range(int(self.n_layers)):
if self.reset_reward[i]==0:
x=self.reward_per_level[i]
self.reward_per_level[i] = x-1
# '/max(np.abs(x),1)
self.reward_per_level[-1]=-1
# if task satisfied 0
elif self.reset_reward[i]==1:
x = self.reward_per_level[i]
self.reward_per_level[i] = x+reward_dict['TaskSatisfied']
# +self.maze.shape[0]/4
self.reward_per_level[-1] = reward_dict['TaskSatisfied']
elif self.reset_reward[i]==2:
x = self.reward_per_level[i]
self.reward_per_level[i] = x +reward_dict['SearchLimit']
# /max(np.abs(x),1)
self.reward_per_level[-1] = +reward_dict['SearchLimit']
# if manager changed wrong -10
elif self.reset_reward[i] == 3:
x = self.reward_per_level[i]
self.reward_per_level[i] = x + reward_dict['TaskFailed']
# /max(np.abs(x),1)
self.reward_per_level[-1] = +reward_dict['TaskFailed']
# self.maze.shape[0]/4
# if wall
elif self.reset_reward[i] == 4:
x = self.reward_per_level[i]
self.reward_per_level[i] = x + reward_dict['Wall']
# /max(np.abs(x),1)
self.reward_per_level[-1] = +reward_dict['Wall']
self.reset_reward[i]=0
self.reset_reward[-1] = 0
# else:
# reward=self.reward_per_level
# reward[-1]=0
reward = self.reward_per_level
# if reward[1] > 0:
# print('ere')
return reward, done
def get_super_manager_1(self, loc):
# find which super manager per finest location state
super_managers = []
number_of_levels = int(self.n_layers)
# super_managers.append([np.floor(x/(2**(number_of_levels-1)/2)) for x in current_state])
# print(loc)
super_managers.append([np.floor(x / self.manager_view) for x in loc])
if number_of_levels - 1 > 1:
for i in range(number_of_levels - 1):
super_managers.append([np.floor(x / self.manager_view) for x in super_managers[-1]])
# print(i)
else:
super_managers.append([0, 0])
return super_managers[::-1]
def generate_maze(self, dim_maze=8, opaque=True):
self.maze=np.zeros((dim_maze,dim_maze))
| [
"numpy.maximum",
"numpy.floor",
"numpy.zeros",
"random.choice",
"copy.copy",
"numpy.where"
] | [((912, 942), 'copy.copy', 'copy.copy', (['self.super_managers'], {}), '(self.super_managers)\n', (921, 942), False, 'import copy\n'), ((1516, 1551), 'numpy.maximum', 'np.maximum', (['(4)', 'self.search_lims[-1]'], {}), '(4, self.search_lims[-1])\n', (1526, 1551), True, 'import numpy as np\n'), ((2946, 2972), 'random.choice', 'random.choice', (['empty_cells'], {}), '(empty_cells)\n', (2959, 2972), False, 'import random\n'), ((4303, 4329), 'random.choice', 'random.choice', (['empty_cells'], {}), '(empty_cells)\n', (4316, 4329), False, 'import random\n'), ((13257, 13276), 'copy.copy', 'copy.copy', (['self.loc'], {}), '(self.loc)\n', (13266, 13276), False, 'import copy\n'), ((13302, 13331), 'copy.copy', 'copy.copy', (['self.current_level'], {}), '(self.current_level)\n', (13311, 13331), False, 'import copy\n'), ((18833, 18863), 'copy.copy', 'copy.copy', (['self.super_managers'], {}), '(self.super_managers)\n', (18842, 18863), False, 'import copy\n'), ((25295, 25325), 'numpy.zeros', 'np.zeros', (['(dim_maze, dim_maze)'], {}), '((dim_maze, dim_maze))\n', (25303, 25325), True, 'import numpy as np\n'), ((2393, 2424), 'numpy.floor', 'np.floor', (['(x / self.manager_view)'], {}), '(x / self.manager_view)\n', (2401, 2424), True, 'import numpy as np\n'), ((3156, 3187), 'numpy.floor', 'np.floor', (['(x / self.manager_view)'], {}), '(x / self.manager_view)\n', (3164, 3187), True, 'import numpy as np\n'), ((3765, 3796), 'numpy.floor', 'np.floor', (['(x / self.manager_view)'], {}), '(x / self.manager_view)\n', (3773, 3796), True, 'import numpy as np\n'), ((10131, 10169), 'numpy.maximum', 'np.maximum', (['(self.expected_level - 1)', '(0)'], {}), '(self.expected_level - 1, 0)\n', (10141, 10169), True, 'import numpy as np\n'), ((24850, 24881), 'numpy.floor', 'np.floor', (['(x / self.manager_view)'], {}), '(x / self.manager_view)\n', (24858, 24881), True, 'import numpy as np\n'), ((2579, 2610), 'numpy.floor', 'np.floor', (['(x / self.manager_view)'], {}), '(x / self.manager_view)\n', (2587, 2610), True, 'import numpy as np\n'), ((2857, 2881), 'numpy.where', 'np.where', (['(self.maze == 0)'], {}), '(self.maze == 0)\n', (2865, 2881), True, 'import numpy as np\n'), ((2886, 2910), 'numpy.where', 'np.where', (['(self.maze == 0)'], {}), '(self.maze == 0)\n', (2894, 2910), True, 'import numpy as np\n'), ((3347, 3378), 'numpy.floor', 'np.floor', (['(x / self.manager_view)'], {}), '(x / self.manager_view)\n', (3355, 3378), True, 'import numpy as np\n'), ((3963, 3994), 'numpy.floor', 'np.floor', (['(x / self.manager_view)'], {}), '(x / self.manager_view)\n', (3971, 3994), True, 'import numpy as np\n'), ((4211, 4235), 'numpy.where', 'np.where', (['(self.maze == 0)'], {}), '(self.maze == 0)\n', (4219, 4235), True, 'import numpy as np\n'), ((4240, 4264), 'numpy.where', 'np.where', (['(self.maze == 0)'], {}), '(self.maze == 0)\n', (4248, 4264), True, 'import numpy as np\n'), ((6678, 6716), 'numpy.maximum', 'np.maximum', (['(self.expected_level - 1)', '(0)'], {}), '(self.expected_level - 1, 0)\n', (6688, 6716), True, 'import numpy as np\n'), ((8066, 8104), 'numpy.maximum', 'np.maximum', (['(self.expected_level - 1)', '(0)'], {}), '(self.expected_level - 1, 0)\n', (8076, 8104), True, 'import numpy as np\n'), ((9076, 9114), 'numpy.maximum', 'np.maximum', (['(self.expected_level - 1)', '(0)'], {}), '(self.expected_level - 1, 0)\n', (9086, 9114), True, 'import numpy as np\n'), ((25026, 25057), 'numpy.floor', 'np.floor', (['(x / self.manager_view)'], {}), '(x / self.manager_view)\n', (25034, 25057), True, 'import numpy as np\n')] |
# Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import pandas as pd
from numpy import nan
from numpy.random import randn
from legate import pandas as lp
ops = ["sum", "prod", "count", "min", "max", "var", "std", "mean"]
print("##### Testing normal inputs #####")
for n in [10, 100]:
s = pd.Series(randn(1, n)[0])
for i in range(n):
if (i + 1) % 4 == 0:
s[i] = nan
ls = lp.Series(s)
for op in ops:
print("Testing " + op)
f = getattr(pd.Series, op)
out_pd = f(s)
f = getattr(lp.Series, op)
out_lp = f(ls)
assert math.fabs(out_pd - out_lp) < 1e-14
s = pd.Series(randn(1, 10)[0])
for i in range(10):
s[i] = nan
ls = lp.Series(s)
print("##### Testing all-null case #####")
for op in ops[3:]:
print("Testing " + op)
f = getattr(pd.Series, op)
out_pd = f(s)
f = getattr(lp.Series, op)
out_lp = f(ls)
assert math.isnan(out_pd)
assert math.isnan(out_lp)
| [
"math.isnan",
"legate.pandas.Series",
"math.fabs",
"numpy.random.randn"
] | [((1258, 1270), 'legate.pandas.Series', 'lp.Series', (['s'], {}), '(s)\n', (1267, 1270), True, 'from legate import pandas as lp\n'), ((955, 967), 'legate.pandas.Series', 'lp.Series', (['s'], {}), '(s)\n', (964, 967), True, 'from legate import pandas as lp\n'), ((1474, 1492), 'math.isnan', 'math.isnan', (['out_pd'], {}), '(out_pd)\n', (1484, 1492), False, 'import math\n'), ((1504, 1522), 'math.isnan', 'math.isnan', (['out_lp'], {}), '(out_lp)\n', (1514, 1522), False, 'import math\n'), ((1201, 1213), 'numpy.random.randn', 'randn', (['(1)', '(10)'], {}), '(1, 10)\n', (1206, 1213), False, 'from numpy.random import randn\n'), ((855, 866), 'numpy.random.randn', 'randn', (['(1)', 'n'], {}), '(1, n)\n', (860, 866), False, 'from numpy.random import randn\n'), ((1150, 1176), 'math.fabs', 'math.fabs', (['(out_pd - out_lp)'], {}), '(out_pd - out_lp)\n', (1159, 1176), False, 'import math\n')] |
from random import seed
import numpy as np
seed(42)
np.random.seed(42)
| [
"numpy.random.seed",
"random.seed"
] | [((44, 52), 'random.seed', 'seed', (['(42)'], {}), '(42)\n', (48, 52), False, 'from random import seed\n'), ((53, 71), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (67, 71), True, 'import numpy as np\n')] |
import numpy as np
from rlbench import tasks
from rlbench.environment import SUPPORTED_ROBOTS
from rlbench.observation_config import ObservationConfig
from rlbench.action_modes import ArmActionMode, ActionMode
from rlbench.environment import Environment as RLEnvironment
from rlbench.task_environment import _DT, Quaternion
from core.common import StepDict, SampleBatch
from core.environment.environment import Environment
from core.utilities.convenience import all_class_names, get_named_class
from .stdout_footpad import suppress_stdout
import random as rnd
class FakeRLBenchEnv(Environment):
ROBOT_NAME = SUPPORTED_ROBOTS.keys()
OBSERVATION_MODE = ("state", "vision", "all")
ACTION_MODE = {"joint velocity": ArmActionMode.ABS_JOINT_VELOCITY,
"delta joint velocity": ArmActionMode.DELTA_JOINT_VELOCITY,
"joint position": ArmActionMode.ABS_JOINT_POSITION,
"delta joint position": ArmActionMode.DELTA_JOINT_POSITION,
"joint torque": ArmActionMode.ABS_JOINT_TORQUE,
"delta joint torque": ArmActionMode.DELTA_JOINT_TORQUE,
"effector velocity": ArmActionMode.ABS_EE_VELOCITY,
"delta effector velocity": ArmActionMode.DELTA_EE_VELOCITY,
"effector position": ArmActionMode.ABS_EE_POSE,
"delta effector position": ArmActionMode.DELTA_EE_POSE}
def __init__(self, task_name: str, observation_mode: str = "state",
action_mode: str = "delta joint position", robot_name: str = "panda"):
super(FakeRLBenchEnv, self).__init__(task_name)
if task_name not in all_class_names(tasks):
raise KeyError(f"Error: unknown task name {task_name}")
if observation_mode not in FakeRLBenchEnv.OBSERVATION_MODE:
raise KeyError(f"Error: unknown observation mode {observation_mode}, available: {FakeRLBenchEnv.OBSERVATION_MODE}")
if action_mode not in FakeRLBenchEnv.ACTION_MODE:
raise KeyError(f"Error: unknown action mode {action_mode}, available: {FakeRLBenchEnv.ACTION_MODE.keys()}")
if robot_name not in FakeRLBenchEnv.ROBOT_NAME:
raise KeyError(f"Error: unknown robot name {robot_name}, available: {FakeRLBenchEnv.ROBOT_NAME}")
# TODO: modify the task/robot/arm/gripper to support early instantiation before v-rep launched
self._observation_mode = observation_mode
self._action_mode = action_mode
self._task_name = task_name
self._robot_name = robot_name
self._observation_config = ObservationConfig()
if self._observation_mode == "state":
self._observation_config.set_all_low_dim(True)
self._observation_config.set_all_high_dim(False)
elif self._observation_mode == "vision":
self._observation_config.set_all_low_dim(False)
self._observation_config.set_all_high_dim(True)
elif self._observation_mode == "all":
self._observation_config.set_all(True)
self._action_config = ActionMode(FakeRLBenchEnv.ACTION_MODE[self._action_mode])
self.env = None
self.task = None
self._update_info_dict()
def init(self, display=False):
with suppress_stdout():
self.env = RLEnvironment(action_mode=self._action_config, obs_config=self._observation_config,
headless=not display, robot_configuration=self._robot_name)
self.env.launch()
self.task = self.env.get_task(get_named_class(self._task_name, tasks))
def reset(self, random: bool = True) -> StepDict:
if not random:
np.random.seed(0)
self.task._static_positions = not random
descriptions, obs = self.task.reset()
# Returns a list of descriptions and the first observation
next_step = {"opt": descriptions}
if self._observation_mode == "state" or self._observation_mode == "all":
next_step['s'] = obs.get_low_dim_data()
if self._observation_mode == "vision" or self._observation_mode == "all":
next_step["left shoulder rgb"] = obs.left_shoulder_rgb
next_step["right_shoulder_rgb"] = obs.right_shoulder_rgb
next_step["wrist_rgb"] = obs.wrist_rgb
return next_step
def step(self, last_step: StepDict) -> (StepDict, bool):
assert 'a' in last_step, "Key 'a' for action not in last_step, maybe you passed a wrong dict ?"
obs, reward, terminate = self.task.step(last_step['a'])
last_step['r'] = reward
last_step["info"] = {}
next_step = {"opt": None}
if self._observation_mode == "state" or self._observation_mode == "all":
next_step['s'] = obs.get_low_dim_data()
if self._observation_mode == "vision" or self._observation_mode == "all":
next_step["left shoulder rgb"] = obs.left_shoulder_rgb
next_step["right_shoulder_rgb"] = obs.right_shoulder_rgb
next_step["wrist_rgb"] = obs.wrist_rgb
return last_step, next_step, terminate
def finalize(self) -> bool:
with suppress_stdout():
self.env.shutdown()
self.task = None
self.env = None
return True
def name(self) -> str:
return self._task_name
# ------------- private methods ------------- #
def _update_info_dict(self):
# update info dict
self._info["action mode"] = self._action_mode
self._info["observation mode"] = self._observation_mode
# TODO: action dim should related to robot, not action mode, here we fixed it temporally
self._info["action dim"] = (self._action_config.action_size,)
self._info["action low"] = -np.ones(self._info["action dim"], dtype=np.float32)
self._info["action high"] = np.ones(self._info["action dim"], dtype=np.float32)
if self._observation_mode == "state" or self._observation_mode == "all":
# TODO: observation should be determined without init the entire environment
with suppress_stdout():
env = RLEnvironment(action_mode=self._action_config, obs_config=self._observation_config,
headless=True, robot_configuration=self._robot_name)
env.launch()
task = env.get_task(get_named_class(self._task_name, tasks))
_, obs = task.reset()
env.shutdown()
del task
del env
self._info["time step"] = _DT
self._info["state dim"] = tuple(obs.get_low_dim_data().shape)
self._info["state low"] = np.ones(self._info["state dim"], dtype=np.float32) * -np.inf
self._info["state high"] = np.ones(self._info["state dim"], dtype=np.float32) * np.inf
if self._observation_mode == "vision" or self._observation_mode == "all":
self._info["left shoulder rgb dim"] = tuple(self._observation_config.left_shoulder_camera.image_size) + (3,)
self._info["left shoulder rgb low"] = np.zeros(self._info["left shoulder rgb dim"], dtype=np.float32)
self._info["left shoulder rgb high"] = np.ones(self._info["left shoulder rgb dim"], dtype=np.float32)
self._info["right shoulder rgb dim"] = tuple(self._observation_config.right_shoulder_camera.image_size) + (3,)
self._info["right shoulder rgb low"] = np.zeros(self._info["right shoulder rgb dim"], dtype=np.float32)
self._info["right shoulder rgb high"] = np.ones(self._info["right shoulder rgb dim"], dtype=np.float32)
self._info["wrist rgb dim"] = tuple(self._observation_config.wrist_camera.image_size) + (3,)
self._info["wrist rgb low"] = np.zeros(self._info["wrist rgb dim"], dtype=np.float32)
self._info["wrist rgb high"] = np.ones(self._info["wrist rgb dim"], dtype=np.float32)
self._info["reward low"] = -np.inf
self._info["reward high"] = np.inf
def live_demo(self, amount: int, random: bool = True) -> SampleBatch:
"""
:param amount: number of demonstration trajectories to be generated
:param random: if the starting position is random
:return: observation list : [amount x [(steps-1) x [s, a] + [s_term, None]]],
WARNING: that the action here is calculated from observation, when executing, they may cause some inaccuracy
"""
seeds = [rnd.randint(0, 4096) if random else 0 for _ in range(amount)]
self.task._static_positions = not random
demo_pack = []
for seed in seeds:
np.random.seed(seed)
pack = self.task.get_demos(1, True)[0]
demo_traj = []
np.random.seed(seed)
desc, obs = self.task.reset()
v_tar = 0.
for o_tar in pack[1:]:
action = []
if self._action_config.arm == ArmActionMode.ABS_JOINT_VELOCITY:
action.extend((o_tar.joint_positions - obs.joint_positions) / _DT)
elif self._action_config.arm == ArmActionMode.ABS_JOINT_POSITION:
action.extend(o_tar.joint_positions)
elif self._action_config.arm == ArmActionMode.ABS_JOINT_TORQUE:
action.extend(o_tar.joint_forces)
raise TypeError("Warning, abs_joint_torque is not currently supported")
elif self._action_config.arm == ArmActionMode.ABS_EE_POSE:
action.extend(o_tar.gripper_pose)
elif self._action_config.arm == ArmActionMode.ABS_EE_VELOCITY:
# WARNING: This calculating method is not so accurate since rotation cannot be directed 'add' together
# since the original RLBench decides to do so, we should follow it
action.extend((o_tar.gripper_pose - obs.gripper_pose) / _DT)
elif self._action_config.arm == ArmActionMode.DELTA_JOINT_VELOCITY:
v_tar = (o_tar.joint_positions - obs.joint_positions) / _DT
action.extend(v_tar - obs.joint_velocities)
raise TypeError("Warning, delta_joint_velocity is not currently supported")
elif self._action_config.arm == ArmActionMode.DELTA_JOINT_POSITION:
action.extend(o_tar.joint_positions - obs.joint_positions)
elif self._action_config.arm == ArmActionMode.DELTA_JOINT_TORQUE:
action.extend(o_tar.joint_forces - obs.joint_forces)
raise TypeError("Warning, delta_joint_torque is not currently supported")
elif self._action_config.arm == ArmActionMode.DELTA_EE_POSE:
action.extend(o_tar.gripper_pose[:3] - obs.gripper_pose[:3])
q = Quaternion(o_tar.gripper_pose[3:7]) * Quaternion(obs.gripper_pose[3:7]).conjugate
action.extend(list(q))
elif self._action_config.arm == ArmActionMode.DELTA_EE_VELOCITY:
# WARNING: This calculating method is not so accurate since rotation cannot be directed 'add' together
# since the original RLBench decides to do so, we should follow it
v_tar_new = (o_tar.gripper_pose - obs.gripper_pose) / _DT
action.extend(v_tar_new - v_tar)
v_tar = v_tar_new
raise TypeError("Warning, delta_ee_velocity is not currently supported")
action.append(1.0 if o_tar.gripper_open > 0.9 else 0.0)
action = np.asarray(action, dtype=np.float32)
demo_traj.append({'observation': obs,
'a': action,
's': obs.get_low_dim_data()})
obs, reward, done = self.task.step(action)
demo_traj[-1]['r'] = reward
demo_pack.append(demo_traj)
return {"trajectory": demo_pack,
"config": "default",
"policy": "hand-coding",
"env class": self.__class__.__name__,
"env name": self._task_name,
"env config": "default",
"observation config": self._observation_mode,
"robot config": self._robot_name,
"action config": self._action_mode}
if __name__ == "__main__":
import bz2
import pickle
from time import sleep
# normal rl environment test
e = FakeRLBenchEnv("CloseMicrowave")
e.init(display=False)
e.reset()
for i in range(10):
e.step({'a': np.random.randn(*e.info()["action dim"])})
sleep(0.1)
e.finalize()
# generate demonstrations
env = FakeRLBenchEnv("PutUmbrellaInUmbrellaStand")
env.init(display=True)
pack = env.live_demo(10, False)
with bz2.BZ2File("demo_10x_PutUmbrellaInUmbrellaStand.pkl", "wb") as f:
pickle.dump(pack, f)
env.finalize()
| [
"rlbench.task_environment.Quaternion",
"rlbench.observation_config.ObservationConfig",
"pickle.dump",
"rlbench.environment.Environment",
"numpy.random.seed",
"core.utilities.convenience.get_named_class",
"random.randint",
"numpy.asarray",
"numpy.zeros",
"rlbench.environment.SUPPORTED_ROBOTS.keys",... | [((615, 638), 'rlbench.environment.SUPPORTED_ROBOTS.keys', 'SUPPORTED_ROBOTS.keys', ([], {}), '()\n', (636, 638), False, 'from rlbench.environment import SUPPORTED_ROBOTS\n'), ((2604, 2623), 'rlbench.observation_config.ObservationConfig', 'ObservationConfig', ([], {}), '()\n', (2621, 2623), False, 'from rlbench.observation_config import ObservationConfig\n'), ((3087, 3144), 'rlbench.action_modes.ActionMode', 'ActionMode', (['FakeRLBenchEnv.ACTION_MODE[self._action_mode]'], {}), '(FakeRLBenchEnv.ACTION_MODE[self._action_mode])\n', (3097, 3144), False, 'from rlbench.action_modes import ArmActionMode, ActionMode\n'), ((5880, 5931), 'numpy.ones', 'np.ones', (["self._info['action dim']"], {'dtype': 'np.float32'}), "(self._info['action dim'], dtype=np.float32)\n", (5887, 5931), True, 'import numpy as np\n'), ((12758, 12768), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (12763, 12768), False, 'from time import sleep\n'), ((12945, 13005), 'bz2.BZ2File', 'bz2.BZ2File', (['"""demo_10x_PutUmbrellaInUmbrellaStand.pkl"""', '"""wb"""'], {}), "('demo_10x_PutUmbrellaInUmbrellaStand.pkl', 'wb')\n", (12956, 13005), False, 'import bz2\n'), ((13020, 13040), 'pickle.dump', 'pickle.dump', (['pack', 'f'], {}), '(pack, f)\n', (13031, 13040), False, 'import pickle\n'), ((1668, 1690), 'core.utilities.convenience.all_class_names', 'all_class_names', (['tasks'], {}), '(tasks)\n', (1683, 1690), False, 'from core.utilities.convenience import all_class_names, get_named_class\n'), ((3320, 3473), 'rlbench.environment.Environment', 'RLEnvironment', ([], {'action_mode': 'self._action_config', 'obs_config': 'self._observation_config', 'headless': '(not display)', 'robot_configuration': 'self._robot_name'}), '(action_mode=self._action_config, obs_config=self.\n _observation_config, headless=not display, robot_configuration=self.\n _robot_name)\n', (3333, 3473), True, 'from rlbench.environment import Environment as RLEnvironment\n'), ((3704, 3721), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3718, 3721), True, 'import numpy as np\n'), ((5792, 5843), 'numpy.ones', 'np.ones', (["self._info['action dim']"], {'dtype': 'np.float32'}), "(self._info['action dim'], dtype=np.float32)\n", (5799, 5843), True, 'import numpy as np\n'), ((7124, 7187), 'numpy.zeros', 'np.zeros', (["self._info['left shoulder rgb dim']"], {'dtype': 'np.float32'}), "(self._info['left shoulder rgb dim'], dtype=np.float32)\n", (7132, 7187), True, 'import numpy as np\n'), ((7239, 7301), 'numpy.ones', 'np.ones', (["self._info['left shoulder rgb dim']"], {'dtype': 'np.float32'}), "(self._info['left shoulder rgb dim'], dtype=np.float32)\n", (7246, 7301), True, 'import numpy as np\n'), ((7478, 7543), 'numpy.zeros', 'np.zeros', (["self._info['right shoulder rgb dim']"], {'dtype': 'np.float32'}), "(self._info['right shoulder rgb dim'], dtype=np.float32)\n", (7486, 7543), True, 'import numpy as np\n'), ((7597, 7661), 'numpy.ones', 'np.ones', (["self._info['right shoulder rgb dim']"], {'dtype': 'np.float32'}), "(self._info['right shoulder rgb dim'], dtype=np.float32)\n", (7604, 7661), True, 'import numpy as np\n'), ((7809, 7864), 'numpy.zeros', 'np.zeros', (["self._info['wrist rgb dim']"], {'dtype': 'np.float32'}), "(self._info['wrist rgb dim'], dtype=np.float32)\n", (7817, 7864), True, 'import numpy as np\n'), ((7908, 7962), 'numpy.ones', 'np.ones', (["self._info['wrist rgb dim']"], {'dtype': 'np.float32'}), "(self._info['wrist rgb dim'], dtype=np.float32)\n", (7915, 7962), True, 'import numpy as np\n'), ((8685, 8705), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (8699, 8705), True, 'import numpy as np\n'), ((8797, 8817), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (8811, 8817), True, 'import numpy as np\n'), ((3573, 3612), 'core.utilities.convenience.get_named_class', 'get_named_class', (['self._task_name', 'tasks'], {}), '(self._task_name, tasks)\n', (3588, 3612), False, 'from core.utilities.convenience import all_class_names, get_named_class\n'), ((6160, 6301), 'rlbench.environment.Environment', 'RLEnvironment', ([], {'action_mode': 'self._action_config', 'obs_config': 'self._observation_config', 'headless': '(True)', 'robot_configuration': 'self._robot_name'}), '(action_mode=self._action_config, obs_config=self.\n _observation_config, headless=True, robot_configuration=self._robot_name)\n', (6173, 6301), True, 'from rlbench.environment import Environment as RLEnvironment\n'), ((6711, 6761), 'numpy.ones', 'np.ones', (["self._info['state dim']"], {'dtype': 'np.float32'}), "(self._info['state dim'], dtype=np.float32)\n", (6718, 6761), True, 'import numpy as np\n'), ((6811, 6861), 'numpy.ones', 'np.ones', (["self._info['state dim']"], {'dtype': 'np.float32'}), "(self._info['state dim'], dtype=np.float32)\n", (6818, 6861), True, 'import numpy as np\n'), ((8511, 8531), 'random.randint', 'rnd.randint', (['(0)', '(4096)'], {}), '(0, 4096)\n', (8522, 8531), True, 'import random as rnd\n'), ((11689, 11725), 'numpy.asarray', 'np.asarray', (['action'], {'dtype': 'np.float32'}), '(action, dtype=np.float32)\n', (11699, 11725), True, 'import numpy as np\n'), ((6398, 6437), 'core.utilities.convenience.get_named_class', 'get_named_class', (['self._task_name', 'tasks'], {}), '(self._task_name, tasks)\n', (6413, 6437), False, 'from core.utilities.convenience import all_class_names, get_named_class\n'), ((10904, 10939), 'rlbench.task_environment.Quaternion', 'Quaternion', (['o_tar.gripper_pose[3:7]'], {}), '(o_tar.gripper_pose[3:7])\n', (10914, 10939), False, 'from rlbench.task_environment import _DT, Quaternion\n'), ((10942, 10975), 'rlbench.task_environment.Quaternion', 'Quaternion', (['obs.gripper_pose[3:7]'], {}), '(obs.gripper_pose[3:7])\n', (10952, 10975), False, 'from rlbench.task_environment import _DT, Quaternion\n')] |
import copy
import warnings
import numpy as np
from ... import AudioSignal, play_utils
class SeparationBase(object):
"""Base class for all separation algorithms in nussl.
Do not call this. It will not do anything.
Parameters:
input_audio_signal (AudioSignal). AudioSignal` object.
This will always be a copy of the provided AudioSignal object.
"""
def __init__(self, input_audio_signal):
self.metadata = {}
self._audio_signal = None
self.audio_signal = input_audio_signal
@property
def sample_rate(self):
"""
(int): Sample rate of :attr:`audio_signal`.
Literally :attr:`audio_signal.sample_rate`.
"""
return self.audio_signal.sample_rate
@property
def stft_params(self):
"""
STFTParams object containing the STFT parameters of the copied AudioSignal.
"""
return self.audio_signal.stft_params
@property
def audio_signal(self):
"""
Copy of AudioSignal that is made on initialization.
"""
return self._audio_signal
def _preprocess_audio_signal(self):
"""
This function should be implemented by the subclass. It can do things like
take the STFT of the audio signal, or resample it to a desired sample rate,
build the input data for a deep model, etc. Here, it does nothing.
"""
pass
@audio_signal.setter
def audio_signal(self, input_audio_signal):
"""
When setting the AudioSignal object for a separation algorithm (which
can happen on initialization or later one), it is copied on set so
as to not alter the data within the original audio signal. If the
AudioSignal object has data, then it the function `_preprocess_audio_signal`
is run, which is implemented by the subclass.
Args:
input_audio_signal ([type]): [description]
"""
if not isinstance(input_audio_signal, AudioSignal):
raise ValueError('input_audio_signal is not an AudioSignal object!')
self._audio_signal = copy.deepcopy(input_audio_signal)
if self.audio_signal is not None:
if not self.audio_signal.has_data:
warnings.warn('input_audio_signal has no data!')
# initialize to empty arrays so that we don't crash randomly
self.audio_signal.audio_data = np.array([])
self.audio_signal.stft_data = np.array([[]])
else:
self._preprocess_audio_signal()
def interact(self, add_residual=False, source='upload', label=None,
ext='.wav', separate_fn=None, outputs="html",
inline=None, inbrowser=None, share=False, debug=False, auth=None,
**kwargs):
"""
Uses gradio to create a small interactive interface
for the separation algorithm. Fair warning, there
may be some race conditions with this...
When you call this from a notebook, the interface will be displayed
below the cell. When you call this from a regular Python script, you'll see a
link print out (a localhost link and a gradio link if you
called this with sharing on). The sessions will last for the duration
of the notebook or the script.
To use this functionality, you must install gradio: `pip install gradio`.
Args:
add_residual: Whether or not to add the residual signal.
source: Either "upload" (upload a file to separate), or "microphone", record.
label (str): Label of interface.
ext (str): Extension for audio file returned.
separate_fn (function): Function that takes in a file object and then returns a matching
element for audio_out.
outputs (str): Defaults to "html", the type of output interface for Gradio to display.
inline (bool): whether to display in the interface inline on python notebooks.
inbrowser (bool): whether to automatically launch the interface in a new tab on the default browser.
share (bool): whether to create a publicly shareable link from your computer for the interface.
debug (bool): if True, and the interface was launched from Google Colab, prints the errors in the cell output.
auth (Tuple[str, str]): If provided, username and password required to access interface.
kwargs: Keyword arguments to gradio.Interface.
Example:
>>> import nussl
>>> nussl.separation.primitive.HPSS(
>>> nussl.AudioSignal()).interact()
"""
try:
import gradio
except: # pragma: no cover
raise ImportError(
"To use this functionality, you must install gradio: "
"pip install gradio.")
def _separate(file_obj): # pragma: no cover
mix = AudioSignal(file_obj.name)
self.audio_signal = mix
estimates = self()
if add_residual:
estimates.append(mix - estimates[0])
estimates = {f'Estimate {i}': s for i, s in enumerate(estimates)}
html = play_utils.multitrack(estimates, ext=ext, display=False)
return html
if label is None: label = f"Separation via {type(self).__name__}"
audio_in = gradio.inputs.Audio(source=source, type="file", label=label)
if separate_fn is None:
separate_fn = _separate
gradio.Interface(
fn=separate_fn,
inputs=audio_in,
outputs=outputs,
**kwargs
).launch(
inline=inline,
inbrowser=inbrowser,
debug=debug,
auth=auth,
share=share
)
def run(self, *args, audio_signal=None, **kwargs):
"""
Runs separation algorithm.
Raises:
NotImplementedError: Cannot call base class
"""
raise NotImplementedError('Cannot call base class.')
def make_audio_signals(self):
"""
Makes :class:`audio_signal.AudioSignal` objects after separation algorithm is run
Raises:
NotImplementedError: Cannot call base class
"""
raise NotImplementedError('Cannot call base class.')
def get_metadata(self, to_str=False, **kwargs):
"""
Returns metadata associated with this separation algorithm.
Args:
to_str (bool): Whether to return the metadata as a string.
Returns:
Formatted metadata if `to_str` is True, else metadata dict.
Raises:
NotImplementedError: Cannot call base class
"""
raise NotImplementedError('Cannot call base class.')
def __call__(self, *args, audio_signal=None, **kwargs):
if audio_signal is not None:
self.audio_signal = audio_signal
self.run(*args, **kwargs)
return self.make_audio_signals()
def __repr__(self):
return f"{self.__class__.__name__} on {str(self.audio_signal)}"
def __eq__(self, other):
for k, v in list(self.__dict__.items()):
if isinstance(v, np.ndarray):
if not np.array_equal(v, other.__dict__[k]):
return False
elif v != other.__dict__[k]:
return False
return True
def __ne__(self, other):
return not self == other
class SeparationException(Exception):
pass
| [
"copy.deepcopy",
"gradio.Interface",
"numpy.array",
"numpy.array_equal",
"warnings.warn",
"gradio.inputs.Audio"
] | [((2148, 2181), 'copy.deepcopy', 'copy.deepcopy', (['input_audio_signal'], {}), '(input_audio_signal)\n', (2161, 2181), False, 'import copy\n'), ((5513, 5573), 'gradio.inputs.Audio', 'gradio.inputs.Audio', ([], {'source': 'source', 'type': '"""file"""', 'label': 'label'}), "(source=source, type='file', label=label)\n", (5532, 5573), False, 'import gradio\n'), ((2288, 2336), 'warnings.warn', 'warnings.warn', (['"""input_audio_signal has no data!"""'], {}), "('input_audio_signal has no data!')\n", (2301, 2336), False, 'import warnings\n'), ((2462, 2474), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2470, 2474), True, 'import numpy as np\n'), ((2521, 2535), 'numpy.array', 'np.array', (['[[]]'], {}), '([[]])\n', (2529, 2535), True, 'import numpy as np\n'), ((5651, 5727), 'gradio.Interface', 'gradio.Interface', ([], {'fn': 'separate_fn', 'inputs': 'audio_in', 'outputs': 'outputs'}), '(fn=separate_fn, inputs=audio_in, outputs=outputs, **kwargs)\n', (5667, 5727), False, 'import gradio\n'), ((7393, 7429), 'numpy.array_equal', 'np.array_equal', (['v', 'other.__dict__[k]'], {}), '(v, other.__dict__[k])\n', (7407, 7429), True, 'import numpy as np\n')] |
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
"""
Unit tests for evaluation operations, each operation is tested for
the forward and the backward pass
"""
from __future__ import division
import numpy as np
import pytest
import cntk as C
from cntk.ops.tests.ops_test_utils import _test_binary_op, AA, precision, PRECISION_TO_TYPE,\
unittest_helper
TARGET_OUT_PAIRS = [
# (target_vector, output_vector)
([[0., 0., 0., 1]], [[1., 2., 3., 4.]]),
([[0., 0., 0.5, 0.5]], [[1., 2., 3., 4.]]),
([[0., 0.4, 0.3, 0.3]], [[2., 1., 1., 4.]])
]
@pytest.mark.parametrize("target_vector, output_vector", TARGET_OUT_PAIRS)
def test_op_cross_entropy_with_soft_max(output_vector, target_vector, device_id, precision):
dt = PRECISION_TO_TYPE[precision]
o = AA(output_vector, dtype=dt)
t = AA(target_vector, dtype=dt)
ox = o - o.max() # subtract max to avoid overflow
exp_x = np.exp(ox)
s_max = exp_x / np.sum(exp_x) # softmax function
expected_forward = np.asarray(-np.sum(t * np.log(s_max, dtype=dt), dtype=dt))
expected_forward.shape = (1,1,1) + expected_forward.shape
s = np.sum(t, dtype=dt)
backward = np.subtract(s_max * s, t)
backward.shape = (1,) + backward.shape
expected_backward = {
'left_arg': backward,
'right_arg': [-1*o]
}
from cntk.losses import cross_entropy_with_softmax
_test_binary_op(precision, device_id, cross_entropy_with_softmax,
output_vector, target_vector,
expected_forward, expected_backward)
TARGET_OUT_PAIRS_WITH_AXIS = [
# (target_vector, output_vector, axis)
([[0., 0., 0., 1]],
[[1., 2., 3., 4.]], -1),
([[0., 0., 0.5, 0.5]],
[[1., 2., 3., 4.]], 1),
([[0., 0.4, 0.3, 0.3]],
[[2., 1., 1., 4.]], 1),
([[0., 0., 0., 1],
[0., 0., 1., 0.]],
[[1., 2., 3., 4.],
[1., 2., 3., 5.]], 1),
([[0., 0., 0., 1],
[0., 1., 0., 0.]],
[[1., 2., 3., 4.],
[1., 7., 3., 5.]], 1)
]
@pytest.mark.parametrize("target_vector, output_vector, axis", TARGET_OUT_PAIRS_WITH_AXIS)
def test_op_cross_entropy_with_soft_max_and_axis(output_vector, target_vector, axis, device_id, precision):
dt = PRECISION_TO_TYPE[precision]
x = AA(output_vector, dtype=dt)
t = AA(target_vector, dtype=dt)
expected_forward = []
expected_backward_left = []
expected_backward_right = []
for sample, target in zip(x, t):
ox = sample - sample.max() # subtract max to avoid overflow
exp_x = np.exp(ox)
s_max = exp_x / np.sum(exp_x) # softmax function
forward = np.asarray(-np.sum(target * np.log(s_max, dtype=dt), dtype=dt))
expected_forward.append(forward.tolist())
s = np.sum(target, dtype=dt)
backward = np.subtract(s_max * s, target)
expected_backward_left.append(backward.tolist())
expected_backward_right.append(-1*sample)
expected_forward = [np.reshape(AA(expected_forward, dtype=dt), (x.shape[0], 1))]
expected_backward_left = AA(expected_backward_left, dtype=dt)
expected_backward = {
'left_arg': [expected_backward_left],
'right_arg': [expected_backward_right]
}
from cntk.losses import cross_entropy_with_softmax
_test_binary_op(precision, device_id, cross_entropy_with_softmax,
output_vector, target_vector,
expected_forward, expected_backward, op_param_dict={'axis': axis})
@pytest.mark.parametrize("target_vector, output_vector", TARGET_OUT_PAIRS)
def test_op_squared_error(output_vector, target_vector, device_id, precision):
dt = PRECISION_TO_TYPE[precision]
o = AA(output_vector, dtype=dt)
t = AA(target_vector, dtype=dt)
expected_forward = AA([np.sum((t - o)**2)])
backward = 2 * np.subtract(o, t)
expected_backward = {
'left_arg': [backward],
'right_arg': [-1*backward]
}
from cntk.losses import squared_error
_test_binary_op(precision, device_id, squared_error,
output_vector, target_vector,
expected_forward, expected_backward)
TARGET_OUT_PAIRS_CLASSIFICATION = [
# (target_vector, output_vector)
([[1., 0., 0., 0]], [[1., 2., 3., 4.]]),
([[0., 0., 0., 1]], [[1., 2., 3., 4.]]),
]
LAMBDA_RANK_GRADIENTS_VALUES_AND_INPUTS = [
# (grad, value, output, gain)
([[-0.2121461], [ 0.2121461]], 58.038055419921875, [1, 2], [7, 1]),
([[-0.14861868], [ 0.14861868]], 40.65847396850586, [3, 4], [3, 1])
]
@pytest.mark.parametrize("grad, value, output, gain", LAMBDA_RANK_GRADIENTS_VALUES_AND_INPUTS)
def test_lambda_rank(grad, value, output, gain, device_id, precision):
dt = PRECISION_TO_TYPE[precision]
score = AA(output, dtype=dt).reshape(-1,1,1)
gain = AA(gain, dtype=dt).reshape(-1,1,1)
group = np.ones_like(score).reshape(-1,1,1)
expected_value = AA(value, dtype=dt)
expected_grad = AA(grad, dtype=dt)
from cntk.losses import lambda_rank
g = C.input_variable((1,))
s = C.input_variable((1,), needs_gradient=True)
n = C.input_variable((1,))
f = lambda_rank(s, n, g)
actual_grad, actual_value = f.grad({s:score, n:gain, g:group}, [s], [f.output])
assert np.allclose(actual_value, expected_value)
assert np.allclose(actual_grad, expected_grad)
NCE_EXPECTED_VALUES = [
# (classes, xdim, batch, expected_value)
(100, 50, 2, [ 3.52544 , 5.671973]),
(1000, 100, 4, [ 1.949046, 2.219169, 2.426618, 3.094275]),
(10000, 200, 6, [ 1.494069, 1.569222, 1.628346, 1.64969 , 1.673538, 1.755621]),
]
@pytest.mark.parametrize("classes, xdim, batch, expected_value", NCE_EXPECTED_VALUES)
def test_nce_loss(classes, xdim, batch, expected_value, device_id, precision):
dt = PRECISION_TO_TYPE[precision]
from cntk.losses import nce_loss
import scipy
x = C.input_variable(xdim, needs_gradient=True)
y = C.input_variable(classes, is_sparse=True)
x0 = np.arange(batch * xdim, dtype=dt).reshape((batch, xdim))/(batch * xdim)
data = np.ones(batch, dtype=dt)
indices = list(range(10,10*batch+1,10))
indptr = list(range(batch+1))
y0 = scipy.sparse.csr_matrix((data, indices, indptr), shape=(batch, classes))
q = np.arange(classes, dtype=dt) + 1
b = C.parameter((classes, 1), init=-np.log(classes))
W = C.parameter((classes, C.InferredDimension), init=C.glorot_uniform(seed=98052))
loss = C.nce_loss(W, b, x, y, q, seed=98052)
v = loss.grad({x:x0, y:y0}, wrt=loss.parameters, as_numpy=False)
for key in v:
assert v[key].is_sparse, "gradient of nce_loss with respect to %s is not sparse"%key
losses = np.zeros((100,batch))
for i in range(100):
losses[i,:] = loss.eval({x:x0, y:y0})
assert np.allclose(np.mean(losses, axis=0), AA(expected_value))
@pytest.mark.parametrize("classes, xdim, batch, expected_value", NCE_EXPECTED_VALUES)
def test_nce_backward_indices(classes, xdim, batch, expected_value, device_id, precision):
"""
Simple test that makes sure that the derivatives have the correct sparsity pattern
"""
# ignore precision, only sparsity pattern matters for this test
dt = np.float32
from cntk.losses import nce_loss
import scipy
trials = 10
# Establish baseline
expected_count = np.zeros(classes)
I = C.constant(np.eye(classes, dtype=dt))
q = np.arange(classes, dtype=dt) + 1
z = C.reduce_sum(C.times(C.random_sample(q, 32, True, seed=98052), I), axis=0)
for i in range(trials):
expected_count[np.nonzero(z.eval().ravel())] += 1
# Set things up to measure the same thing with nce_loss
x = C.input_variable(xdim, needs_gradient=True)
y = C.input_variable(classes, is_sparse=True)
x0 = np.arange(batch * xdim, dtype=dt).reshape((batch, xdim))/(batch * xdim)
data = np.ones(batch, dtype=dt)
indices = list(range(10,10*batch+1,10))
indptr = list(range(batch+1))
y0 = scipy.sparse.csr_matrix((data, indices, indptr), shape=(batch, classes))
b = C.parameter((classes, 1))
W = C.parameter((classes, C.InferredDimension))
gb = np.zeros(classes)
vb = C.input_variable((classes, 1), dtype=dt)
Ib = C.constant(np.eye(1, dtype=dt))
zb = C.times(vb, Ib)
loss = C.nce_loss(W, b, x, y, q, seed=98052)
for i in range(trials):
v = loss.grad({x: x0, y: y0}, wrt=loss.parameters, as_numpy=False)
gb[np.nonzero(zb.eval({vb: v[b]}).ravel())] += 1
for i in range(classes):
assert gb[i] == expected_count[i] or (i in indices and gb[i] == trials) | [
"numpy.sum",
"cntk.nce_loss",
"numpy.allclose",
"numpy.ones",
"cntk.parameter",
"numpy.mean",
"numpy.arange",
"numpy.exp",
"pytest.mark.parametrize",
"cntk.times",
"cntk.ops.tests.ops_test_utils.AA",
"numpy.ones_like",
"cntk.ops.tests.ops_test_utils._test_binary_op",
"cntk.random_sample",
... | [((751, 824), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""target_vector, output_vector"""', 'TARGET_OUT_PAIRS'], {}), "('target_vector, output_vector', TARGET_OUT_PAIRS)\n", (774, 824), False, 'import pytest\n'), ((2191, 2284), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""target_vector, output_vector, axis"""', 'TARGET_OUT_PAIRS_WITH_AXIS'], {}), "('target_vector, output_vector, axis',\n TARGET_OUT_PAIRS_WITH_AXIS)\n", (2214, 2284), False, 'import pytest\n'), ((3655, 3728), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""target_vector, output_vector"""', 'TARGET_OUT_PAIRS'], {}), "('target_vector, output_vector', TARGET_OUT_PAIRS)\n", (3678, 3728), False, 'import pytest\n'), ((4709, 4806), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""grad, value, output, gain"""', 'LAMBDA_RANK_GRADIENTS_VALUES_AND_INPUTS'], {}), "('grad, value, output, gain',\n LAMBDA_RANK_GRADIENTS_VALUES_AND_INPUTS)\n", (4732, 4806), False, 'import pytest\n'), ((5797, 5885), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""classes, xdim, batch, expected_value"""', 'NCE_EXPECTED_VALUES'], {}), "('classes, xdim, batch, expected_value',\n NCE_EXPECTED_VALUES)\n", (5820, 5885), False, 'import pytest\n'), ((7029, 7117), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""classes, xdim, batch, expected_value"""', 'NCE_EXPECTED_VALUES'], {}), "('classes, xdim, batch, expected_value',\n NCE_EXPECTED_VALUES)\n", (7052, 7117), False, 'import pytest\n'), ((965, 992), 'cntk.ops.tests.ops_test_utils.AA', 'AA', (['output_vector'], {'dtype': 'dt'}), '(output_vector, dtype=dt)\n', (967, 992), False, 'from cntk.ops.tests.ops_test_utils import _test_binary_op, AA, precision, PRECISION_TO_TYPE, unittest_helper\n'), ((1001, 1028), 'cntk.ops.tests.ops_test_utils.AA', 'AA', (['target_vector'], {'dtype': 'dt'}), '(target_vector, dtype=dt)\n', (1003, 1028), False, 'from cntk.ops.tests.ops_test_utils import _test_binary_op, AA, precision, PRECISION_TO_TYPE, unittest_helper\n'), ((1097, 1107), 'numpy.exp', 'np.exp', (['ox'], {}), '(ox)\n', (1103, 1107), True, 'import numpy as np\n'), ((1315, 1334), 'numpy.sum', 'np.sum', (['t'], {'dtype': 'dt'}), '(t, dtype=dt)\n', (1321, 1334), True, 'import numpy as np\n'), ((1350, 1375), 'numpy.subtract', 'np.subtract', (['(s_max * s)', 't'], {}), '(s_max * s, t)\n', (1361, 1375), True, 'import numpy as np\n'), ((1571, 1707), 'cntk.ops.tests.ops_test_utils._test_binary_op', '_test_binary_op', (['precision', 'device_id', 'cross_entropy_with_softmax', 'output_vector', 'target_vector', 'expected_forward', 'expected_backward'], {}), '(precision, device_id, cross_entropy_with_softmax,\n output_vector, target_vector, expected_forward, expected_backward)\n', (1586, 1707), False, 'from cntk.ops.tests.ops_test_utils import _test_binary_op, AA, precision, PRECISION_TO_TYPE, unittest_helper\n'), ((2436, 2463), 'cntk.ops.tests.ops_test_utils.AA', 'AA', (['output_vector'], {'dtype': 'dt'}), '(output_vector, dtype=dt)\n', (2438, 2463), False, 'from cntk.ops.tests.ops_test_utils import _test_binary_op, AA, precision, PRECISION_TO_TYPE, unittest_helper\n'), ((2472, 2499), 'cntk.ops.tests.ops_test_utils.AA', 'AA', (['target_vector'], {'dtype': 'dt'}), '(target_vector, dtype=dt)\n', (2474, 2499), False, 'from cntk.ops.tests.ops_test_utils import _test_binary_op, AA, precision, PRECISION_TO_TYPE, unittest_helper\n'), ((3226, 3262), 'cntk.ops.tests.ops_test_utils.AA', 'AA', (['expected_backward_left'], {'dtype': 'dt'}), '(expected_backward_left, dtype=dt)\n', (3228, 3262), False, 'from cntk.ops.tests.ops_test_utils import _test_binary_op, AA, precision, PRECISION_TO_TYPE, unittest_helper\n'), ((3450, 3620), 'cntk.ops.tests.ops_test_utils._test_binary_op', '_test_binary_op', (['precision', 'device_id', 'cross_entropy_with_softmax', 'output_vector', 'target_vector', 'expected_forward', 'expected_backward'], {'op_param_dict': "{'axis': axis}"}), "(precision, device_id, cross_entropy_with_softmax,\n output_vector, target_vector, expected_forward, expected_backward,\n op_param_dict={'axis': axis})\n", (3465, 3620), False, 'from cntk.ops.tests.ops_test_utils import _test_binary_op, AA, precision, PRECISION_TO_TYPE, unittest_helper\n'), ((3855, 3882), 'cntk.ops.tests.ops_test_utils.AA', 'AA', (['output_vector'], {'dtype': 'dt'}), '(output_vector, dtype=dt)\n', (3857, 3882), False, 'from cntk.ops.tests.ops_test_utils import _test_binary_op, AA, precision, PRECISION_TO_TYPE, unittest_helper\n'), ((3891, 3918), 'cntk.ops.tests.ops_test_utils.AA', 'AA', (['target_vector'], {'dtype': 'dt'}), '(target_vector, dtype=dt)\n', (3893, 3918), False, 'from cntk.ops.tests.ops_test_utils import _test_binary_op, AA, precision, PRECISION_TO_TYPE, unittest_helper\n'), ((4153, 4276), 'cntk.ops.tests.ops_test_utils._test_binary_op', '_test_binary_op', (['precision', 'device_id', 'squared_error', 'output_vector', 'target_vector', 'expected_forward', 'expected_backward'], {}), '(precision, device_id, squared_error, output_vector,\n target_vector, expected_forward, expected_backward)\n', (4168, 4276), False, 'from cntk.ops.tests.ops_test_utils import _test_binary_op, AA, precision, PRECISION_TO_TYPE, unittest_helper\n'), ((5079, 5098), 'cntk.ops.tests.ops_test_utils.AA', 'AA', (['value'], {'dtype': 'dt'}), '(value, dtype=dt)\n', (5081, 5098), False, 'from cntk.ops.tests.ops_test_utils import _test_binary_op, AA, precision, PRECISION_TO_TYPE, unittest_helper\n'), ((5120, 5138), 'cntk.ops.tests.ops_test_utils.AA', 'AA', (['grad'], {'dtype': 'dt'}), '(grad, dtype=dt)\n', (5122, 5138), False, 'from cntk.ops.tests.ops_test_utils import _test_binary_op, AA, precision, PRECISION_TO_TYPE, unittest_helper\n'), ((5189, 5211), 'cntk.input_variable', 'C.input_variable', (['(1,)'], {}), '((1,))\n', (5205, 5211), True, 'import cntk as C\n'), ((5220, 5263), 'cntk.input_variable', 'C.input_variable', (['(1,)'], {'needs_gradient': '(True)'}), '((1,), needs_gradient=True)\n', (5236, 5263), True, 'import cntk as C\n'), ((5272, 5294), 'cntk.input_variable', 'C.input_variable', (['(1,)'], {}), '((1,))\n', (5288, 5294), True, 'import cntk as C\n'), ((5303, 5323), 'cntk.losses.lambda_rank', 'lambda_rank', (['s', 'n', 'g'], {}), '(s, n, g)\n', (5314, 5323), False, 'from cntk.losses import lambda_rank\n'), ((5421, 5462), 'numpy.allclose', 'np.allclose', (['actual_value', 'expected_value'], {}), '(actual_value, expected_value)\n', (5432, 5462), True, 'import numpy as np\n'), ((5474, 5513), 'numpy.allclose', 'np.allclose', (['actual_grad', 'expected_grad'], {}), '(actual_grad, expected_grad)\n', (5485, 5513), True, 'import numpy as np\n'), ((6063, 6106), 'cntk.input_variable', 'C.input_variable', (['xdim'], {'needs_gradient': '(True)'}), '(xdim, needs_gradient=True)\n', (6079, 6106), True, 'import cntk as C\n'), ((6115, 6156), 'cntk.input_variable', 'C.input_variable', (['classes'], {'is_sparse': '(True)'}), '(classes, is_sparse=True)\n', (6131, 6156), True, 'import cntk as C\n'), ((6250, 6274), 'numpy.ones', 'np.ones', (['batch'], {'dtype': 'dt'}), '(batch, dtype=dt)\n', (6257, 6274), True, 'import numpy as np\n'), ((6362, 6434), 'scipy.sparse.csr_matrix', 'scipy.sparse.csr_matrix', (['(data, indices, indptr)'], {'shape': '(batch, classes)'}), '((data, indices, indptr), shape=(batch, classes))\n', (6385, 6434), False, 'import scipy\n'), ((6634, 6671), 'cntk.nce_loss', 'C.nce_loss', (['W', 'b', 'x', 'y', 'q'], {'seed': '(98052)'}), '(W, b, x, y, q, seed=98052)\n', (6644, 6671), True, 'import cntk as C\n'), ((6865, 6887), 'numpy.zeros', 'np.zeros', (['(100, batch)'], {}), '((100, batch))\n', (6873, 6887), True, 'import numpy as np\n'), ((7515, 7532), 'numpy.zeros', 'np.zeros', (['classes'], {}), '(classes)\n', (7523, 7532), True, 'import numpy as np\n'), ((7859, 7902), 'cntk.input_variable', 'C.input_variable', (['xdim'], {'needs_gradient': '(True)'}), '(xdim, needs_gradient=True)\n', (7875, 7902), True, 'import cntk as C\n'), ((7911, 7952), 'cntk.input_variable', 'C.input_variable', (['classes'], {'is_sparse': '(True)'}), '(classes, is_sparse=True)\n', (7927, 7952), True, 'import cntk as C\n'), ((8046, 8070), 'numpy.ones', 'np.ones', (['batch'], {'dtype': 'dt'}), '(batch, dtype=dt)\n', (8053, 8070), True, 'import numpy as np\n'), ((8158, 8230), 'scipy.sparse.csr_matrix', 'scipy.sparse.csr_matrix', (['(data, indices, indptr)'], {'shape': '(batch, classes)'}), '((data, indices, indptr), shape=(batch, classes))\n', (8181, 8230), False, 'import scipy\n'), ((8240, 8265), 'cntk.parameter', 'C.parameter', (['(classes, 1)'], {}), '((classes, 1))\n', (8251, 8265), True, 'import cntk as C\n'), ((8274, 8317), 'cntk.parameter', 'C.parameter', (['(classes, C.InferredDimension)'], {}), '((classes, C.InferredDimension))\n', (8285, 8317), True, 'import cntk as C\n'), ((8328, 8345), 'numpy.zeros', 'np.zeros', (['classes'], {}), '(classes)\n', (8336, 8345), True, 'import numpy as np\n'), ((8355, 8395), 'cntk.input_variable', 'C.input_variable', (['(classes, 1)'], {'dtype': 'dt'}), '((classes, 1), dtype=dt)\n', (8371, 8395), True, 'import cntk as C\n'), ((8446, 8461), 'cntk.times', 'C.times', (['vb', 'Ib'], {}), '(vb, Ib)\n', (8453, 8461), True, 'import cntk as C\n'), ((8474, 8511), 'cntk.nce_loss', 'C.nce_loss', (['W', 'b', 'x', 'y', 'q'], {'seed': '(98052)'}), '(W, b, x, y, q, seed=98052)\n', (8484, 8511), True, 'import cntk as C\n'), ((1128, 1141), 'numpy.sum', 'np.sum', (['exp_x'], {}), '(exp_x)\n', (1134, 1141), True, 'import numpy as np\n'), ((2715, 2725), 'numpy.exp', 'np.exp', (['ox'], {}), '(ox)\n', (2721, 2725), True, 'import numpy as np\n'), ((2928, 2952), 'numpy.sum', 'np.sum', (['target'], {'dtype': 'dt'}), '(target, dtype=dt)\n', (2934, 2952), True, 'import numpy as np\n'), ((2972, 3002), 'numpy.subtract', 'np.subtract', (['(s_max * s)', 'target'], {}), '(s_max * s, target)\n', (2983, 3002), True, 'import numpy as np\n'), ((3988, 4005), 'numpy.subtract', 'np.subtract', (['o', 't'], {}), '(o, t)\n', (3999, 4005), True, 'import numpy as np\n'), ((6444, 6472), 'numpy.arange', 'np.arange', (['classes'], {'dtype': 'dt'}), '(classes, dtype=dt)\n', (6453, 6472), True, 'import numpy as np\n'), ((6981, 7004), 'numpy.mean', 'np.mean', (['losses'], {'axis': '(0)'}), '(losses, axis=0)\n', (6988, 7004), True, 'import numpy as np\n'), ((7006, 7024), 'cntk.ops.tests.ops_test_utils.AA', 'AA', (['expected_value'], {}), '(expected_value)\n', (7008, 7024), False, 'from cntk.ops.tests.ops_test_utils import _test_binary_op, AA, precision, PRECISION_TO_TYPE, unittest_helper\n'), ((7552, 7577), 'numpy.eye', 'np.eye', (['classes'], {'dtype': 'dt'}), '(classes, dtype=dt)\n', (7558, 7577), True, 'import numpy as np\n'), ((7587, 7615), 'numpy.arange', 'np.arange', (['classes'], {'dtype': 'dt'}), '(classes, dtype=dt)\n', (7596, 7615), True, 'import numpy as np\n'), ((8416, 8435), 'numpy.eye', 'np.eye', (['(1)'], {'dtype': 'dt'}), '(1, dtype=dt)\n', (8422, 8435), True, 'import numpy as np\n'), ((2750, 2763), 'numpy.sum', 'np.sum', (['exp_x'], {}), '(exp_x)\n', (2756, 2763), True, 'import numpy as np\n'), ((3147, 3177), 'cntk.ops.tests.ops_test_utils.AA', 'AA', (['expected_forward'], {'dtype': 'dt'}), '(expected_forward, dtype=dt)\n', (3149, 3177), False, 'from cntk.ops.tests.ops_test_utils import _test_binary_op, AA, precision, PRECISION_TO_TYPE, unittest_helper\n'), ((3947, 3967), 'numpy.sum', 'np.sum', (['((t - o) ** 2)'], {}), '((t - o) ** 2)\n', (3953, 3967), True, 'import numpy as np\n'), ((4925, 4945), 'cntk.ops.tests.ops_test_utils.AA', 'AA', (['output'], {'dtype': 'dt'}), '(output, dtype=dt)\n', (4927, 4945), False, 'from cntk.ops.tests.ops_test_utils import _test_binary_op, AA, precision, PRECISION_TO_TYPE, unittest_helper\n'), ((4974, 4992), 'cntk.ops.tests.ops_test_utils.AA', 'AA', (['gain'], {'dtype': 'dt'}), '(gain, dtype=dt)\n', (4976, 4992), False, 'from cntk.ops.tests.ops_test_utils import _test_binary_op, AA, precision, PRECISION_TO_TYPE, unittest_helper\n'), ((5021, 5040), 'numpy.ones_like', 'np.ones_like', (['score'], {}), '(score)\n', (5033, 5040), True, 'import numpy as np\n'), ((6592, 6620), 'cntk.glorot_uniform', 'C.glorot_uniform', ([], {'seed': '(98052)'}), '(seed=98052)\n', (6608, 6620), True, 'import cntk as C\n'), ((7649, 7689), 'cntk.random_sample', 'C.random_sample', (['q', '(32)', '(True)'], {'seed': '(98052)'}), '(q, 32, True, seed=98052)\n', (7664, 7689), True, 'import cntk as C\n'), ((6167, 6200), 'numpy.arange', 'np.arange', (['(batch * xdim)'], {'dtype': 'dt'}), '(batch * xdim, dtype=dt)\n', (6176, 6200), True, 'import numpy as np\n'), ((6518, 6533), 'numpy.log', 'np.log', (['classes'], {}), '(classes)\n', (6524, 6533), True, 'import numpy as np\n'), ((7963, 7996), 'numpy.arange', 'np.arange', (['(batch * xdim)'], {'dtype': 'dt'}), '(batch * xdim, dtype=dt)\n', (7972, 7996), True, 'import numpy as np\n'), ((1208, 1231), 'numpy.log', 'np.log', (['s_max'], {'dtype': 'dt'}), '(s_max, dtype=dt)\n', (1214, 1231), True, 'import numpy as np\n'), ((2829, 2852), 'numpy.log', 'np.log', (['s_max'], {'dtype': 'dt'}), '(s_max, dtype=dt)\n', (2835, 2852), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Digital signal processing functions; pyo table, file, & sample conversions
"""
from __future__ import absolute_import, division, print_function
import os
import sys
import time
import numpy as np
from scipy.signal import butter, lfilter
try:
import pyo64 as pyo
except Exception:
import pyo
class PyoFormatException(Exception):
pass
# --- time-related helper functions --------------------------
# Ensure we have a high-resolution clock; code from PsychoPy (<NAME>)
if sys.platform == 'win32':
from ctypes import byref, c_int64, windll
_fcounter = c_int64()
_qpfreq = c_int64()
windll.Kernel32.QueryPerformanceFrequency(byref(_qpfreq))
_qpfreq = float(_qpfreq.value)
_winQPC = windll.Kernel32.QueryPerformanceCounter
def get_time():
"""High-precision replacement for time.time() on Windows.
"""
_winQPC(byref(_fcounter))
return _fcounter.value / _qpfreq
else:
import timeit
get_time = timeit.default_timer
MIN_SLEEP = 0.001 # used in sleep() function
def sleep(sec=0):
"""Use time.sleep with a minimum duration sleep threshold.
"""
time.sleep(max(MIN_SLEEP, sec))
# --- digital signal processing helper functions --------------------------
_butter_cache = {}
def _butter(order, band, rate=44100):
"""Cache-ing version of scipy.signal's butter().
Allows faster band-pass filtering during real-time processing.
"""
global _butter_cache
_h = hash((order, band, rate))
if not _h in _butter_cache:
low, high = band
nyqfreq = float(rate) / 2
lowf = low / nyqfreq
highf = high / nyqfreq
_butter_cache[_h] = butter(order, (lowf, highf), btype='band')
return _butter_cache[_h]
def bandpass_pre_cache(lows=(80, 100, 120),
highs=(1200, 3000, 8000),
bands=((2000, 8000),), # content-filtered speech
rate=44100):
"""Call _butter now to cache some useful (b, a) values.
"""
for low in lows:
for high in highs:
_butter(6, (low, high), rate=rate)
for band in bands:
_butter(6, band, rate=rate)
def bandpass(data, low=80, high=1200, rate=44100, order=6):
"""Return bandpass filtered `data`.
"""
b, a = _butter(order, (low, high), rate)
return lfilter(b, a, data)
def rms(data):
"""Basic audio-power measure: root-mean-square of data.
Identical to `std` when the mean is zero; faster to compute just rms.
"""
if data.dtype == np.int16:
md2 = data.astype(np.float) ** 2 # int16 wrap around --> negative
else:
md2 = data ** 2
return np.sqrt(np.mean(md2))
def std(data):
"""Like rms, but also subtracts the mean (= slower).
"""
return np.std(data)
def smooth(data, win=16, tile=True):
"""Running smoothed average, via convolution over `win` window-size.
`tile` with the mean at start and end by default; otherwise replace with 0.
"""
weights = np.ones(win) / win
data_c = np.convolve(data, weights)[win - 1:-(win - 1)]
if tile:
pre = np.tile(data_c[0], win // 2)
post = np.tile(data_c[-1], win // 2)
else:
pre = post = np.zeros(win // 2)
data_pre_c = np.concatenate((pre, data_c))
data_pre_c_post = np.concatenate((data_pre_c, post))
return data_pre_c_post[:len(data)]
def zero_crossings(data):
"""Return a vector of length n-1 of zero-crossings within vector `data`.
1 if the adjacent values switched sign, or
0 if they stayed the same sign.
"""
zx = np.zeros(len(data))
zx[np.where(data[:-1] * data[1:] < 0)] = 1
return zx
def tone(freq=440, sec=2, rate=44100, vol=.99):
"""Return a np.array suitable for use as a tone (pure sine wave).
"""
samples = sec * rate
time_steps = np.arange(0., 1., 1. / samples)
scaling = 2 * np.pi * freq * sec
return np.sin(time_steps * scaling) * vol
def apodize(data, ms=5, rate=44100):
"""Apply a Hanning window (5ms) to reduce a sound's 'click' onset / offset.
"""
hw_size = int(min(rate // (1000 / ms), len(data) // 15))
hanning_window = np.hanning(2 * hw_size + 1)
data[:hw_size] *= hanning_window[:hw_size]
data[-hw_size:] *= hanning_window[-hw_size:]
return data
# --- pyo helper functions ------------------------------------------------
# format codes for _get_pyo_codes():
pyo_formats = {'wav': 0, 'aif': 1, 'aiff': 1, 'au': 2, 'raw': 3,
'sd2': 4, 'flac': 5, 'caf': 6, 'ogg': 7}
pyo_dtype = {'int16': 0, 'int24': 1, 'int32': 2, 'float32': 3,
'float64': 4, 'U-Law': 5, 'A-Law': 6}
def _get_pyo_codes(fmt='', dtype='int16', file_out=''):
"""Convert file and data formats to int codes, e.g., wav int16 -> (0, 0).
"""
if not fmt:
dot_ext = os.path.splitext(file_out)[1]
fmt = dot_ext.lower().strip('.')
if fmt in pyo_formats:
file_fmt = pyo_formats[fmt]
else:
msg = 'format `{0}` not supported'.format(file_out)
raise PyoFormatException(msg)
if fmt in ['sd2', 'flac']:
ok_dfmt = {'int16': 0, 'int24': 1}
else:
ok_dfmt = pyo_dtype
if dtype in ok_dfmt:
data_fmt = pyo_dtype[dtype]
else:
msg = 'data format `{0}` not supported for `{1}`'.format(
dtype, file_out)
raise PyoFormatException(msg)
return file_fmt, data_fmt
def samples_from_table(table, start=0, stop=-1, rate=44100):
"""Return samples as a np.array read from a pyo table.
A (start, stop) selection in seconds may require a non-default rate.
"""
samples = np.array(table.getTable())
if (start, stop) != (0, -1):
if stop > start:
samples = samples[start * rate:stop * rate]
elif start:
samples = samples[start * rate:]
return samples
def table_from_samples(samples, start=0, stop=-1, rate=44100):
"""Return a pyo DataTable constructed from samples.
A (start, stop) selection in seconds may require a non-default rate.
"""
if type(samples) == np.ndarray:
samples = samples.tolist()
if type(samples) != list:
raise TypeError('samples should be a list or np.array')
if (start, stop) != (0, -1):
if stop > start:
samples = samples[start * rate:stop * rate]
elif start:
samples = samples[start * rate:]
table = pyo.DataTable(size=len(samples), init=samples)
return table
def table_from_file(file_in, start=0, stop=-1):
"""Read data from files, any pyo format, returns (rate, pyo SndTable)
"""
table = pyo.SndTable()
try:
table.setSound(file_in, start=start, stop=stop)
except TypeError:
msg = 'bad file `{0}`, or format not supported'.format(file_in)
raise PyoFormatException(msg)
rate = pyo.sndinfo(file_in)[2]
return rate, table
def samples_from_file(file_in, start=0, stop=-1):
"""Read data from files, returns tuple (rate, np.array(.float64))
"""
if not os.path.isfile(file_in):
raise IOError('no such file `{0}`'.format(file_in))
rate, table = table_from_file(file_in, start=start, stop=stop)
return rate, np.array(table.getTable())
def samples_to_file(samples, rate, file_out, fmt='', dtype='int16'):
"""Write data to file, using requested format or infer from file .ext.
Only integer `rate` values are supported.
See http://ajaxsoundstudio.com/pyodoc/api/functions/sndfile.html
"""
file_fmt, data_fmt = _get_pyo_codes(fmt, dtype, file_out)
if type(samples) == np.ndarray:
samples = samples.tolist()
if type(samples) != list:
raise TypeError('samples should be a list or np.array')
try:
pyo.savefile(samples, path=file_out, sr=int(rate), channels=1,
fileformat=file_fmt, sampletype=data_fmt)
except Exception:
msg = 'could not save `{0}`; permissions or other issue?'
raise IOError(msg.format(file_out))
def table_to_file(table, file_out, fmt='', dtype='int16'):
"""Write data to file, using requested format or infer from file .ext.
"""
file_fmt, data_fmt = _get_pyo_codes(fmt, dtype, file_out)
try:
pyo.savefileFromTable(table=table, path=file_out,
fileformat=file_fmt, sampletype=data_fmt)
except Exception:
msg = 'could not save `{0}`; permissions or other issue?'
raise IOError(msg.format(file_out))
| [
"numpy.ones",
"os.path.isfile",
"numpy.mean",
"numpy.arange",
"numpy.tile",
"numpy.sin",
"numpy.convolve",
"ctypes.byref",
"scipy.signal.lfilter",
"numpy.std",
"numpy.hanning",
"scipy.signal.butter",
"pyo.SndTable",
"pyo.savefileFromTable",
"numpy.concatenate",
"numpy.zeros",
"numpy.... | [((625, 634), 'ctypes.c_int64', 'c_int64', ([], {}), '()\n', (632, 634), False, 'from ctypes import byref, c_int64, windll\n'), ((649, 658), 'ctypes.c_int64', 'c_int64', ([], {}), '()\n', (656, 658), False, 'from ctypes import byref, c_int64, windll\n'), ((2388, 2407), 'scipy.signal.lfilter', 'lfilter', (['b', 'a', 'data'], {}), '(b, a, data)\n', (2395, 2407), False, 'from scipy.signal import butter, lfilter\n'), ((2834, 2846), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (2840, 2846), True, 'import numpy as np\n'), ((3309, 3338), 'numpy.concatenate', 'np.concatenate', (['(pre, data_c)'], {}), '((pre, data_c))\n', (3323, 3338), True, 'import numpy as np\n'), ((3361, 3395), 'numpy.concatenate', 'np.concatenate', (['(data_pre_c, post)'], {}), '((data_pre_c, post))\n', (3375, 3395), True, 'import numpy as np\n'), ((3892, 3926), 'numpy.arange', 'np.arange', (['(0.0)', '(1.0)', '(1.0 / samples)'], {}), '(0.0, 1.0, 1.0 / samples)\n', (3901, 3926), True, 'import numpy as np\n'), ((4216, 4243), 'numpy.hanning', 'np.hanning', (['(2 * hw_size + 1)'], {}), '(2 * hw_size + 1)\n', (4226, 4243), True, 'import numpy as np\n'), ((6686, 6700), 'pyo.SndTable', 'pyo.SndTable', ([], {}), '()\n', (6698, 6700), False, 'import pyo\n'), ((705, 719), 'ctypes.byref', 'byref', (['_qpfreq'], {}), '(_qpfreq)\n', (710, 719), False, 'from ctypes import byref, c_int64, windll\n'), ((1724, 1766), 'scipy.signal.butter', 'butter', (['order', '(lowf, highf)'], {'btype': '"""band"""'}), "(order, (lowf, highf), btype='band')\n", (1730, 1766), False, 'from scipy.signal import butter, lfilter\n'), ((2727, 2739), 'numpy.mean', 'np.mean', (['md2'], {}), '(md2)\n', (2734, 2739), True, 'import numpy as np\n'), ((3062, 3074), 'numpy.ones', 'np.ones', (['win'], {}), '(win)\n', (3069, 3074), True, 'import numpy as np\n'), ((3094, 3120), 'numpy.convolve', 'np.convolve', (['data', 'weights'], {}), '(data, weights)\n', (3105, 3120), True, 'import numpy as np\n'), ((3168, 3196), 'numpy.tile', 'np.tile', (['data_c[0]', '(win // 2)'], {}), '(data_c[0], win // 2)\n', (3175, 3196), True, 'import numpy as np\n'), ((3212, 3241), 'numpy.tile', 'np.tile', (['data_c[-1]', '(win // 2)'], {}), '(data_c[-1], win // 2)\n', (3219, 3241), True, 'import numpy as np\n'), ((3273, 3291), 'numpy.zeros', 'np.zeros', (['(win // 2)'], {}), '(win // 2)\n', (3281, 3291), True, 'import numpy as np\n'), ((3668, 3702), 'numpy.where', 'np.where', (['(data[:-1] * data[1:] < 0)'], {}), '(data[:-1] * data[1:] < 0)\n', (3676, 3702), True, 'import numpy as np\n'), ((3972, 4000), 'numpy.sin', 'np.sin', (['(time_steps * scaling)'], {}), '(time_steps * scaling)\n', (3978, 4000), True, 'import numpy as np\n'), ((6909, 6929), 'pyo.sndinfo', 'pyo.sndinfo', (['file_in'], {}), '(file_in)\n', (6920, 6929), False, 'import pyo\n'), ((7097, 7120), 'os.path.isfile', 'os.path.isfile', (['file_in'], {}), '(file_in)\n', (7111, 7120), False, 'import os\n'), ((8290, 8385), 'pyo.savefileFromTable', 'pyo.savefileFromTable', ([], {'table': 'table', 'path': 'file_out', 'fileformat': 'file_fmt', 'sampletype': 'data_fmt'}), '(table=table, path=file_out, fileformat=file_fmt,\n sampletype=data_fmt)\n', (8311, 8385), False, 'import pyo\n'), ((925, 941), 'ctypes.byref', 'byref', (['_fcounter'], {}), '(_fcounter)\n', (930, 941), False, 'from ctypes import byref, c_int64, windll\n'), ((4886, 4912), 'os.path.splitext', 'os.path.splitext', (['file_out'], {}), '(file_out)\n', (4902, 4912), False, 'import os\n')] |
#%%
import os
import json
import pathlib
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from utils import tsplot, path2valuestimes
from rliable import library as rly
from rliable import metrics
from rliable import plot_utils
paths_and_names = [
("diagonal_runs/", "Diagonal"),
("hessianfree_runs/", "HF"),
("kfac_runs/", "KFAC"),
("ekfac_runs/", "EKFAC"),
("tengradv2_runs/", "TENGraD"),
]
# %%
def analyze(runs_path, speed_runs_path):
bp, suffix = runs_path, ""
f = lambda hp: True
#### make it look nice
env_paths = [
pathlib.Path(f"{bp}/HalfCheetah-v2{suffix}/"),
pathlib.Path(f"{bp}/Ant-v2{suffix}/"),
pathlib.Path(f"{bp}/Hopper-v2{suffix}/"),
pathlib.Path(f"{bp}/Humanoid-v2{suffix}/"),
pathlib.Path(f"{bp}/Walker2d-v2{suffix}/"),
pathlib.Path(f"{bp}/Reacher-v2{suffix}/"),
pathlib.Path(f"{bp}/Swimmer-v2{suffix}/"),
]
limit = {
"HalfCheetah-v2": [-1000],
"Ant-v2": [-1000, 3000],
"Hopper-v2": [-100],
"Humanoid-v2": [0],
"Walker2d-v2": [-100],
"Reacher-v2": [-150, 20],
"Swimmer-v2": [-20],
}
plot_time = False
fig, axes = plt.subplots(2, 4, figsize=(20, 10))
fvalues_performance = {}
fvalues_stability = {}
fvalues_threshold = {}
fvalues_seeds = {}
if not os.path.isfile("json/performance_thresholds.json"):
assert (
name == "baseline"
), "thresholds should be extracted from baseline performance runs"
## extract threshold values
env_2_threshold = {}
for i, (ax, rp) in enumerate(zip(axes.flatten(), env_paths)):
print(f"----- {rp} -----")
data_and_names = []
for main_path, n in paths_and_names:
data = (path2valuestimes(rp, main_path, f, hparams=True), n)
data_and_names.append(data)
# the best mean score of the lowest ranking optimizer
threshold = min([v.mean(0).max(0) for (v, _, _, _), _ in data_and_names])
env_2_threshold[rp.name] = threshold
# save the thresholds for future comparison
print("saving performance threshold values...")
with open("json/performance_thresholds.json", "w") as file:
json.dump(env_2_threshold, file, indent=4, sort_keys=True)
else:
with open("json/performance_thresholds.json", "r") as file:
env_2_threshold = json.load(file)
for i, (ax, rp) in enumerate(zip(axes.flatten(), env_paths)):
print(f"----- {rp} -----")
data_and_names = []
for main_path, n in paths_and_names:
data = (path2valuestimes(rp, main_path, f, hparams=True), n)
data_and_names.append(data)
colors = plt.cm.rainbow(np.linspace(0, 1, len(data_and_names)))
colidx = str(rp.name).split("_")[0]
fvalues_threshold[colidx] = {}
fvalues_performance[colidx] = {}
fvalues_stability[colidx] = {}
fvalues_seeds[colidx] = {}
threshold = env_2_threshold[colidx]
for ((values, _, steps, hps), n), c in zip(data_and_names, colors):
time = steps[0]
time *= hps[0]["num_envs"] * hps[0]["num_steps"]
std = values.std(0)
# ci_95 = 1.96 * std / (values.shape[0] ** 0.5)
fvalues_stability[colidx][n] = -std.mean(0) # larger values = worse
fvalues_performance[colidx][n] = values.mean(0).max(
0
) # larger performance = better
fvalues_seeds[colidx][n] = len(hps)
idx = (values.mean(0) >= threshold).nonzero()[0]
if len(idx) > 0:
idx = idx[0]
fvalues_threshold[colidx][n] = (
-time[idx] / 1000.0
) # more env steps = worse score
else:
fvalues_threshold[colidx][n] = np.nan
tsplot(values, time, ax=ax, label=f"{n}", color=c)
env_name = rp.name
ax.set_title(env_name)
if len(limit[env_name]) == 1:
ax.set_ylim(bottom=limit[env_name][0])
else:
ax.set_ylim(limit[env_name])
# ax.set_xlim([0, 1e6])
if i == 0:
ax.set_xlabel("Time (ms)" if plot_time else "Number of Environment Steps")
ax.set_ylabel("Average Return")
ax.legend()
axes.flatten()[-1].axis('off')
for ax in axes.flatten()[-4:-1]:
pos = ax.get_position()
pos.x0 += 0.1
pos.x1 += 0.1
ax.set_position(pos)
fig.savefig("performance.pdf", bbox_inches='tight')
plt.show()
speed = get_speed_df(speed_runs_path)
#%%
perf = pd.DataFrame(data=fvalues_performance).T
stab = pd.DataFrame(data=fvalues_stability).T
threshold = pd.DataFrame(data=fvalues_threshold).T
seeds = pd.DataFrame(data=fvalues_seeds).T
return perf, stab, threshold, speed, seeds
# normalize performance of each env so the scores are equally weighted
def get_normalized_perf(perf):
for (c, maxv), (c2, minv) in zip(
perf.max(axis=1).items(), perf.min(axis=1).items()
):
assert c == c2
perf.loc[c] = (perf.loc[c] - minv) / (maxv - minv)
scores = (perf.mean(0) * 100).round(2)
return scores
def mean_df(perf, stab, threshold, speed, seeds, name, save=False):
pathlib.Path(f"csv/{name}").mkdir(exist_ok=True, parents=True)
names = ["perf", "stab", "threshold", "speed"]
if save:
for metric_n, df in zip(names, [perf, stab, threshold, speed]):
df.to_csv(f"csv/{name}/{name}_{metric_n}.csv")
data = {
"Performance": dict(perf.mean(0)),
"Stability": dict(stab.mean(0)),
"Sample Efficiency": dict(threshold.mean(0)),
"Speed": dict(speed.mean(0)),
"Norm_Performance": dict(get_normalized_perf(perf.copy()))
# 'Seeds': dict(seeds.mean(0)),
}
df = pd.DataFrame(data)
# df = df.round(0)
for c in df.columns:
if c == "Speed":
df[c] = df[c].round(3)
elif c == "Norm_Performance":
df[c] = df[c].round(2)
elif not (c == "Seeds"):
df[c] = df[c].round(0)
if save:
df.to_csv(f"csv/{name}/{name}_all.csv")
return df
def load(name):
perf = pd.read_csv(f"csv/{name}/{name}_perf.csv", index_col=0)
stab = pd.read_csv(f"csv/{name}/{name}_stab.csv", index_col=0)
threshold = pd.read_csv(f"csv/{name}/{name}_threshold.csv", index_col=0)
speed = pd.read_csv(f"csv/{name}/{name}_speed.csv", index_col=0)
df = pd.read_csv(f"csv/{name}/{name}_all.csv", index_col=0)
return (perf, stab, threshold, speed), df
def get_speed_df(runs_path):
bp, suffix = runs_path, ""
f = lambda hp: True
env_paths = [
pathlib.Path(f"{bp}/HalfCheetah-v2{suffix}/"),
pathlib.Path(f"{bp}/Ant-v2{suffix}/"),
pathlib.Path(f"{bp}/Hopper-v2{suffix}/"),
pathlib.Path(f"{bp}/Humanoid-v2{suffix}/"),
pathlib.Path(f"{bp}/Walker2d-v2{suffix}/"),
pathlib.Path(f"{bp}/Reacher-v2{suffix}/"),
pathlib.Path(f"{bp}/Swimmer-v2{suffix}/"),
]
fvalues_time = {}
for i, rp in enumerate(env_paths):
colidx = str(rp.name).split("_")[0]
fvalues_time[colidx] = {}
data_and_names = []
for main_path, n in paths_and_names:
data = (path2valuestimes(rp, main_path, f, hparams=True), n)
data_and_names.append(data)
for (values, times, steps, hps), n in data_and_names:
time = times[0][-1]
fvalues_time[colidx][n] = -time
speed = pd.DataFrame(data=fvalues_time).T
return speed
# %%
read_csv_data = False
save = False
# %%
runs_path = pathlib.Path("../runs/5e6_baseline/")
speed_runs_path = pathlib.Path("../runs/baseline_speed/")
name = "baseline"
if read_csv_data:
baseline_data, baseline_df = load(name)
else:
baseline_data = analyze(runs_path, speed_runs_path)
baseline_df = mean_df(*baseline_data, name, save=save)
baseline_df
# latex(baseline_df)
#%%
runs_path = pathlib.Path("../runs/5e6_best_batch/")
speed_runs_path = pathlib.Path("../runs/batch_speed/")
name = "best_batch"
if read_csv_data:
batch_data, batch_df = load(name)
else:
batch_data = analyze(runs_path, speed_runs_path)
batch_df = mean_df(*batch_data, name, save=save)
batch_df
#%%
runs_path = pathlib.Path("../runs/5e6_best_critic/")
speed_runs_path = pathlib.Path("../runs/critic_speed/")
name = "best_critic"
if read_csv_data:
critic_data, critic_df = load(name)
else:
critic_data = analyze(runs_path, speed_runs_path)
critic_df = mean_df(*critic_data, name, save=save)
critic_df
# %%
def full_summary(data1, data2):
names = ["perf", "stab", "threshold"]
dfs = []
for n in names:
df = percent_improvement_metric(n, data1, data2)
dfs.append(df)
df = df.copy()
for (i, p_row), (j, s_row), (k, t_row) in zip(*[d.iterrows() for d in dfs]):
for p, s, t in zip(p_row.items(), s_row.items(), t_row.items()):
row, p = p
s, t = s[1], t[1]
p, s, t = [v.split("(")[-1][:-1] for v in (p, s, t)]
df[row][i] = f"({p}, {s}, {t})"
return df
# improvement from df1 -> df2
def percent_improvement(df1, df2):
df_improve = (df2.abs() - df1.abs()) / df1 * 100.0
for (i, r_base_df2), (j, r_improv), (l, r_base_df1) in zip(
df2.iterrows(), df_improve.iterrows(), df1.iterrows()
):
for k in r_base_df2.keys():
if np.isnan(r_base_df2[k]): # df2 doesnt acheive score / baseline
r_base_df2[k] = f"NaN"
elif np.isnan(r_base_df1[k]): # df1 achieves score but df2 doesnt
r_base_df2[k] = f"{r_base_df2[k] :.0f} (!)"
else:
r_base_df2[
k
] = f"{r_base_df2[k] :.0f} ({'+' if r_improv[k] >= 0 else ''}{r_improv[k] :.0f}%)"
df_improve.loc[j] = r_base_df2
return df_improve
col2idx = {
"perf": 0,
"stab": 1,
"threshold": 2,
"speed": 3,
}
def percent_improvement_metric(data1, data2, col):
idx = col2idx[col]
return percent_improvement(data1[idx], data2[idx])
#%%
def full_improvements_approx(approx_name, data1, data2):
col_names = ["Performance", "Stability", "Sample Efficiency", "Speed"]
metric_names = ["perf", "stab", "threshold", "speed"]
dfs = []
for col_name, metric_name in zip(col_names, metric_names):
df = percent_improvement_metric(data1, data2, metric_name)
df = df[[approx_name]]
df.columns = [col_name]
dfs.append(df)
df = pd.concat(dfs, axis=1)
return df
def latex(df):
print(df.to_latex())
def rliable_aggregate_metrics():
runs_path = pathlib.Path("../runs/5e6_baseline/")
bp, suffix = runs_path, ""
f = lambda _: True
env_paths = [
pathlib.Path(f"{bp}/HalfCheetah-v2{suffix}/"),
pathlib.Path(f"{bp}/Ant-v2{suffix}/"),
pathlib.Path(f"{bp}/Hopper-v2{suffix}/"),
pathlib.Path(f"{bp}/Humanoid-v2{suffix}/"),
pathlib.Path(f"{bp}/Walker2d-v2{suffix}/"),
pathlib.Path(f"{bp}/Reacher-v2{suffix}/"),
pathlib.Path(f"{bp}/Swimmer-v2{suffix}/"),
]
colors = plt.cm.rainbow(np.linspace(0, 1, len(paths_and_names)))
# algorithm -> env scores dict
color_dict = {}
data_and_names = {}
for (main_path, n), col in zip(paths_and_names, colors):
data_and_names[n] = []
color_dict[n] = col
for i, rp in enumerate(env_paths):
(values, _, steps, hps) = path2valuestimes(rp, main_path, f, hparams=True)
data_and_names[n].append(values.max(-1))
# normalize the scores based on best total score in env
for i in range(len(env_paths)):
best_score = -float("inf")
worst_score = float("inf")
for n in data_and_names:
best_score = max(data_and_names[n][i].max(), best_score)
worst_score = min(data_and_names[n][i].min(), worst_score)
for n in data_and_names:
data_and_names[n][i] = (data_and_names[n][i] - worst_score) / (
best_score - worst_score
)
# n_runs x n_games matrix
for n in data_and_names:
data_and_names[n] = np.array(data_and_names[n]).T
#%%
# compute the results
algorithms = list(data_and_names.keys())
aggregate_func = lambda x: np.array(
[
metrics.aggregate_median(x),
metrics.aggregate_iqm(x),
metrics.aggregate_mean(x),
metrics.aggregate_optimality_gap(x),
]
)
aggregate_scores, aggregate_score_cis = rly.get_interval_estimates(
data_and_names, aggregate_func, reps=50000
)
#%%
fig, axes = plot_utils.plot_interval_estimates(
aggregate_scores,
aggregate_score_cis,
metric_names=["Median", "IQM", "Mean", "Optimality Gap"],
algorithms=algorithms,
xlabel="Normalized Performance Scores",
colors=color_dict,
xlabel_y_coordinate=-0.18,
)
fig.savefig("metrics.pdf", bbox_inches='tight')
return fig
#%%
fig = rliable_aggregate_metrics()
plt.show()
#%%
if save:
save_path = pathlib.Path("csv/improvements/")
save_path.mkdir(exist_ok=True, parents=True)
(save_path / "batch/").mkdir(exist_ok=True)
(save_path / "critic/").mkdir(exist_ok=True)
for _, approx_name in paths_and_names:
df = full_improvements_approx(approx_name, baseline_data, batch_data)
df.to_csv(f"{save_path}/batch/batch_improve_{approx_name}.csv")
df = full_improvements_approx(approx_name, baseline_data, critic_data)
df.to_csv(f"{save_path}/critic/critic_improve_{approx_name}.csv")
#%%
print("--- BATCH SIZE IMPROVEMENTS ---")
for _, approx_name in paths_and_names:
print(f"---- {approx_name} ----")
df = full_improvements_approx(approx_name, baseline_data, batch_data)
latex(df)
#%%
print("--- CRITIC + BATCH SIZE IMPROVEMENTS ---")
for _, approx_name in paths_and_names:
print(f"---- {approx_name} ----")
df = full_improvements_approx(approx_name, baseline_data, critic_data)
latex(df)
# %%
print(percent_improvement(baseline_df, critic_df).to_latex())
# %%
percent_improvement(batch_df, critic_df)
# %%
percent_improvement_metric("perf", baseline_data, batch_data)
#%%
percent_improvement_metric("perf", batch_data, critic_data)
| [
"rliable.metrics.aggregate_iqm",
"pandas.read_csv",
"numpy.isnan",
"pathlib.Path",
"os.path.isfile",
"rliable.metrics.aggregate_median",
"utils.tsplot",
"pandas.DataFrame",
"rliable.metrics.aggregate_mean",
"rliable.library.get_interval_estimates",
"matplotlib.pyplot.subplots",
"pandas.concat"... | [((7815, 7852), 'pathlib.Path', 'pathlib.Path', (['"""../runs/5e6_baseline/"""'], {}), "('../runs/5e6_baseline/')\n", (7827, 7852), False, 'import pathlib\n'), ((7871, 7910), 'pathlib.Path', 'pathlib.Path', (['"""../runs/baseline_speed/"""'], {}), "('../runs/baseline_speed/')\n", (7883, 7910), False, 'import pathlib\n'), ((8162, 8201), 'pathlib.Path', 'pathlib.Path', (['"""../runs/5e6_best_batch/"""'], {}), "('../runs/5e6_best_batch/')\n", (8174, 8201), False, 'import pathlib\n'), ((8220, 8256), 'pathlib.Path', 'pathlib.Path', (['"""../runs/batch_speed/"""'], {}), "('../runs/batch_speed/')\n", (8232, 8256), False, 'import pathlib\n'), ((8471, 8511), 'pathlib.Path', 'pathlib.Path', (['"""../runs/5e6_best_critic/"""'], {}), "('../runs/5e6_best_critic/')\n", (8483, 8511), False, 'import pathlib\n'), ((8530, 8567), 'pathlib.Path', 'pathlib.Path', (['"""../runs/critic_speed/"""'], {}), "('../runs/critic_speed/')\n", (8542, 8567), False, 'import pathlib\n'), ((13300, 13310), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13308, 13310), True, 'import matplotlib.pyplot as plt\n'), ((1226, 1262), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(4)'], {'figsize': '(20, 10)'}), '(2, 4, figsize=(20, 10))\n', (1238, 1262), True, 'import matplotlib.pyplot as plt\n'), ((4694, 4704), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4702, 4704), True, 'import matplotlib.pyplot as plt\n'), ((6005, 6023), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (6017, 6023), True, 'import pandas as pd\n'), ((6380, 6435), 'pandas.read_csv', 'pd.read_csv', (['f"""csv/{name}/{name}_perf.csv"""'], {'index_col': '(0)'}), "(f'csv/{name}/{name}_perf.csv', index_col=0)\n", (6391, 6435), True, 'import pandas as pd\n'), ((6447, 6502), 'pandas.read_csv', 'pd.read_csv', (['f"""csv/{name}/{name}_stab.csv"""'], {'index_col': '(0)'}), "(f'csv/{name}/{name}_stab.csv', index_col=0)\n", (6458, 6502), True, 'import pandas as pd\n'), ((6519, 6579), 'pandas.read_csv', 'pd.read_csv', (['f"""csv/{name}/{name}_threshold.csv"""'], {'index_col': '(0)'}), "(f'csv/{name}/{name}_threshold.csv', index_col=0)\n", (6530, 6579), True, 'import pandas as pd\n'), ((6592, 6648), 'pandas.read_csv', 'pd.read_csv', (['f"""csv/{name}/{name}_speed.csv"""'], {'index_col': '(0)'}), "(f'csv/{name}/{name}_speed.csv', index_col=0)\n", (6603, 6648), True, 'import pandas as pd\n'), ((6658, 6712), 'pandas.read_csv', 'pd.read_csv', (['f"""csv/{name}/{name}_all.csv"""'], {'index_col': '(0)'}), "(f'csv/{name}/{name}_all.csv', index_col=0)\n", (6669, 6712), True, 'import pandas as pd\n'), ((10738, 10760), 'pandas.concat', 'pd.concat', (['dfs'], {'axis': '(1)'}), '(dfs, axis=1)\n', (10747, 10760), True, 'import pandas as pd\n'), ((10868, 10905), 'pathlib.Path', 'pathlib.Path', (['"""../runs/5e6_baseline/"""'], {}), "('../runs/5e6_baseline/')\n", (10880, 10905), False, 'import pathlib\n'), ((12778, 12848), 'rliable.library.get_interval_estimates', 'rly.get_interval_estimates', (['data_and_names', 'aggregate_func'], {'reps': '(50000)'}), '(data_and_names, aggregate_func, reps=50000)\n', (12804, 12848), True, 'from rliable import library as rly\n'), ((12888, 13141), 'rliable.plot_utils.plot_interval_estimates', 'plot_utils.plot_interval_estimates', (['aggregate_scores', 'aggregate_score_cis'], {'metric_names': "['Median', 'IQM', 'Mean', 'Optimality Gap']", 'algorithms': 'algorithms', 'xlabel': '"""Normalized Performance Scores"""', 'colors': 'color_dict', 'xlabel_y_coordinate': '(-0.18)'}), "(aggregate_scores, aggregate_score_cis,\n metric_names=['Median', 'IQM', 'Mean', 'Optimality Gap'], algorithms=\n algorithms, xlabel='Normalized Performance Scores', colors=color_dict,\n xlabel_y_coordinate=-0.18)\n", (12922, 13141), False, 'from rliable import plot_utils\n'), ((13341, 13374), 'pathlib.Path', 'pathlib.Path', (['"""csv/improvements/"""'], {}), "('csv/improvements/')\n", (13353, 13374), False, 'import pathlib\n'), ((591, 636), 'pathlib.Path', 'pathlib.Path', (['f"""{bp}/HalfCheetah-v2{suffix}/"""'], {}), "(f'{bp}/HalfCheetah-v2{suffix}/')\n", (603, 636), False, 'import pathlib\n'), ((646, 683), 'pathlib.Path', 'pathlib.Path', (['f"""{bp}/Ant-v2{suffix}/"""'], {}), "(f'{bp}/Ant-v2{suffix}/')\n", (658, 683), False, 'import pathlib\n'), ((693, 733), 'pathlib.Path', 'pathlib.Path', (['f"""{bp}/Hopper-v2{suffix}/"""'], {}), "(f'{bp}/Hopper-v2{suffix}/')\n", (705, 733), False, 'import pathlib\n'), ((743, 785), 'pathlib.Path', 'pathlib.Path', (['f"""{bp}/Humanoid-v2{suffix}/"""'], {}), "(f'{bp}/Humanoid-v2{suffix}/')\n", (755, 785), False, 'import pathlib\n'), ((795, 837), 'pathlib.Path', 'pathlib.Path', (['f"""{bp}/Walker2d-v2{suffix}/"""'], {}), "(f'{bp}/Walker2d-v2{suffix}/')\n", (807, 837), False, 'import pathlib\n'), ((847, 888), 'pathlib.Path', 'pathlib.Path', (['f"""{bp}/Reacher-v2{suffix}/"""'], {}), "(f'{bp}/Reacher-v2{suffix}/')\n", (859, 888), False, 'import pathlib\n'), ((898, 939), 'pathlib.Path', 'pathlib.Path', (['f"""{bp}/Swimmer-v2{suffix}/"""'], {}), "(f'{bp}/Swimmer-v2{suffix}/')\n", (910, 939), False, 'import pathlib\n'), ((1382, 1432), 'os.path.isfile', 'os.path.isfile', (['"""json/performance_thresholds.json"""'], {}), "('json/performance_thresholds.json')\n", (1396, 1432), False, 'import os\n'), ((4768, 4806), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'fvalues_performance'}), '(data=fvalues_performance)\n', (4780, 4806), True, 'import pandas as pd\n'), ((4820, 4856), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'fvalues_stability'}), '(data=fvalues_stability)\n', (4832, 4856), True, 'import pandas as pd\n'), ((4875, 4911), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'fvalues_threshold'}), '(data=fvalues_threshold)\n', (4887, 4911), True, 'import pandas as pd\n'), ((4926, 4958), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'fvalues_seeds'}), '(data=fvalues_seeds)\n', (4938, 4958), True, 'import pandas as pd\n'), ((6871, 6916), 'pathlib.Path', 'pathlib.Path', (['f"""{bp}/HalfCheetah-v2{suffix}/"""'], {}), "(f'{bp}/HalfCheetah-v2{suffix}/')\n", (6883, 6916), False, 'import pathlib\n'), ((6926, 6963), 'pathlib.Path', 'pathlib.Path', (['f"""{bp}/Ant-v2{suffix}/"""'], {}), "(f'{bp}/Ant-v2{suffix}/')\n", (6938, 6963), False, 'import pathlib\n'), ((6973, 7013), 'pathlib.Path', 'pathlib.Path', (['f"""{bp}/Hopper-v2{suffix}/"""'], {}), "(f'{bp}/Hopper-v2{suffix}/')\n", (6985, 7013), False, 'import pathlib\n'), ((7023, 7065), 'pathlib.Path', 'pathlib.Path', (['f"""{bp}/Humanoid-v2{suffix}/"""'], {}), "(f'{bp}/Humanoid-v2{suffix}/')\n", (7035, 7065), False, 'import pathlib\n'), ((7075, 7117), 'pathlib.Path', 'pathlib.Path', (['f"""{bp}/Walker2d-v2{suffix}/"""'], {}), "(f'{bp}/Walker2d-v2{suffix}/')\n", (7087, 7117), False, 'import pathlib\n'), ((7127, 7168), 'pathlib.Path', 'pathlib.Path', (['f"""{bp}/Reacher-v2{suffix}/"""'], {}), "(f'{bp}/Reacher-v2{suffix}/')\n", (7139, 7168), False, 'import pathlib\n'), ((7178, 7219), 'pathlib.Path', 'pathlib.Path', (['f"""{bp}/Swimmer-v2{suffix}/"""'], {}), "(f'{bp}/Swimmer-v2{suffix}/')\n", (7190, 7219), False, 'import pathlib\n'), ((7704, 7735), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'fvalues_time'}), '(data=fvalues_time)\n', (7716, 7735), True, 'import pandas as pd\n'), ((10986, 11031), 'pathlib.Path', 'pathlib.Path', (['f"""{bp}/HalfCheetah-v2{suffix}/"""'], {}), "(f'{bp}/HalfCheetah-v2{suffix}/')\n", (10998, 11031), False, 'import pathlib\n'), ((11041, 11078), 'pathlib.Path', 'pathlib.Path', (['f"""{bp}/Ant-v2{suffix}/"""'], {}), "(f'{bp}/Ant-v2{suffix}/')\n", (11053, 11078), False, 'import pathlib\n'), ((11088, 11128), 'pathlib.Path', 'pathlib.Path', (['f"""{bp}/Hopper-v2{suffix}/"""'], {}), "(f'{bp}/Hopper-v2{suffix}/')\n", (11100, 11128), False, 'import pathlib\n'), ((11138, 11180), 'pathlib.Path', 'pathlib.Path', (['f"""{bp}/Humanoid-v2{suffix}/"""'], {}), "(f'{bp}/Humanoid-v2{suffix}/')\n", (11150, 11180), False, 'import pathlib\n'), ((11190, 11232), 'pathlib.Path', 'pathlib.Path', (['f"""{bp}/Walker2d-v2{suffix}/"""'], {}), "(f'{bp}/Walker2d-v2{suffix}/')\n", (11202, 11232), False, 'import pathlib\n'), ((11242, 11283), 'pathlib.Path', 'pathlib.Path', (['f"""{bp}/Reacher-v2{suffix}/"""'], {}), "(f'{bp}/Reacher-v2{suffix}/')\n", (11254, 11283), False, 'import pathlib\n'), ((11293, 11334), 'pathlib.Path', 'pathlib.Path', (['f"""{bp}/Swimmer-v2{suffix}/"""'], {}), "(f'{bp}/Swimmer-v2{suffix}/')\n", (11305, 11334), False, 'import pathlib\n'), ((2325, 2383), 'json.dump', 'json.dump', (['env_2_threshold', 'file'], {'indent': '(4)', 'sort_keys': '(True)'}), '(env_2_threshold, file, indent=4, sort_keys=True)\n', (2334, 2383), False, 'import json\n'), ((2492, 2507), 'json.load', 'json.load', (['file'], {}), '(file)\n', (2501, 2507), False, 'import json\n'), ((3974, 4024), 'utils.tsplot', 'tsplot', (['values', 'time'], {'ax': 'ax', 'label': 'f"""{n}"""', 'color': 'c'}), "(values, time, ax=ax, label=f'{n}', color=c)\n", (3980, 4024), False, 'from utils import tsplot, path2valuestimes\n'), ((5434, 5461), 'pathlib.Path', 'pathlib.Path', (['f"""csv/{name}"""'], {}), "(f'csv/{name}')\n", (5446, 5461), False, 'import pathlib\n'), ((9621, 9644), 'numpy.isnan', 'np.isnan', (['r_base_df2[k]'], {}), '(r_base_df2[k])\n', (9629, 9644), True, 'import numpy as np\n'), ((11693, 11741), 'utils.path2valuestimes', 'path2valuestimes', (['rp', 'main_path', 'f'], {'hparams': '(True)'}), '(rp, main_path, f, hparams=True)\n', (11709, 11741), False, 'from utils import tsplot, path2valuestimes\n'), ((12388, 12415), 'numpy.array', 'np.array', (['data_and_names[n]'], {}), '(data_and_names[n])\n', (12396, 12415), True, 'import numpy as np\n'), ((2703, 2751), 'utils.path2valuestimes', 'path2valuestimes', (['rp', 'main_path', 'f'], {'hparams': '(True)'}), '(rp, main_path, f, hparams=True)\n', (2719, 2751), False, 'from utils import tsplot, path2valuestimes\n'), ((7460, 7508), 'utils.path2valuestimes', 'path2valuestimes', (['rp', 'main_path', 'f'], {'hparams': '(True)'}), '(rp, main_path, f, hparams=True)\n', (7476, 7508), False, 'from utils import tsplot, path2valuestimes\n'), ((9741, 9764), 'numpy.isnan', 'np.isnan', (['r_base_df1[k]'], {}), '(r_base_df1[k])\n', (9749, 9764), True, 'import numpy as np\n'), ((12562, 12589), 'rliable.metrics.aggregate_median', 'metrics.aggregate_median', (['x'], {}), '(x)\n', (12586, 12589), False, 'from rliable import metrics\n'), ((12603, 12627), 'rliable.metrics.aggregate_iqm', 'metrics.aggregate_iqm', (['x'], {}), '(x)\n', (12624, 12627), False, 'from rliable import metrics\n'), ((12641, 12666), 'rliable.metrics.aggregate_mean', 'metrics.aggregate_mean', (['x'], {}), '(x)\n', (12663, 12666), False, 'from rliable import metrics\n'), ((12680, 12715), 'rliable.metrics.aggregate_optimality_gap', 'metrics.aggregate_optimality_gap', (['x'], {}), '(x)\n', (12712, 12715), False, 'from rliable import metrics\n'), ((1837, 1885), 'utils.path2valuestimes', 'path2valuestimes', (['rp', 'main_path', 'f'], {'hparams': '(True)'}), '(rp, main_path, f, hparams=True)\n', (1853, 1885), False, 'from utils import tsplot, path2valuestimes\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 21:30:59 2019
@author: charles
"""
import argparse
import numpy as np
from scipy.io.wavfile import read, write
import os
def normalize(x: np.ndarray):
return x/np.max(x);
def split_file(file, time_before=0.025, time_after=0.2, trigger=1000, normalize_result=False):
outputs = []
# Open file
sample_rate, a = read(file)
a = np.array(a,dtype=float)
# Compute params
length_after = (int)(time_after*sample_rate)
length_before = (int)(time_before*sample_rate)
# Display sound (debug)
#plt.plot(a)
#plt.show()
i = 0
while i < a.size :
# End of usable recording
if(i+length_after > a.size):
break;
if (a[i] > trigger and i >= length_before):
sub = a[i-length_before:i+length_after]
if(normalize_result): sub = normalize(sub)
outputs.append(sub)
i += length_after
i += 1
return outputs, sample_rate;
def main():
parser = argparse.ArgumentParser(description='Split key presses recording.')
parser.add_argument('--input', type=str, help='Input WAV file')
parser.add_argument('--out-dir', type=str, help='Output directory')
parser.add_argument('--label', type=str, help='Output files prefix')
parser.add_argument('--split-label-char', type=str, default='', help='Char to split the label string')
parser.add_argument('--trigger', type=float, default=1000, help='Trigger threshold')
parser.add_argument('--time_before', type=float, default=0.025, help='Samples to keep before triggers (s)')
parser.add_argument('--time_after', type=float, default=0.2, help='Samples to keep after triggers (s)')
args = parser.parse_args()
outputs,sample_rate = split_file(args.input,args.time_before, args.time_after, args.trigger)
if(args.split_label_char == ''):
labels = [args.label]
else:
labels = str.split(args.label, args.split_label_char)
if(len(outputs)%len(labels)):
print("ERROR!")
return
n = 0; i = 0
for output in outputs:
while os.path.isfile(args.out_dir + "/" + labels[i%len(labels)] + "_" + str(n) + ".wav"):
n+=1
write(args.out_dir + "/" + labels[i%len(labels)] + "_" + str(n) + ".wav", sample_rate, np.asarray(output, dtype=np.int16))
print('Created ' + args.out_dir + "/" + labels[i%len(labels)] + "_" + str(n) + ".wav!")
i += 1
if __name__== "__main__":
main()
| [
"argparse.ArgumentParser",
"numpy.asarray",
"scipy.io.wavfile.read",
"numpy.max",
"numpy.array"
] | [((402, 412), 'scipy.io.wavfile.read', 'read', (['file'], {}), '(file)\n', (406, 412), False, 'from scipy.io.wavfile import read, write\n'), ((421, 445), 'numpy.array', 'np.array', (['a'], {'dtype': 'float'}), '(a, dtype=float)\n', (429, 445), True, 'import numpy as np\n'), ((1065, 1132), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Split key presses recording."""'}), "(description='Split key presses recording.')\n", (1088, 1132), False, 'import argparse\n'), ((240, 249), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (246, 249), True, 'import numpy as np\n'), ((2381, 2415), 'numpy.asarray', 'np.asarray', (['output'], {'dtype': 'np.int16'}), '(output, dtype=np.int16)\n', (2391, 2415), True, 'import numpy as np\n')] |
# TODO: Write more tests!
import numpy as np
from tests.plottools import popplot
from nmrtools.nmrplot import dnmrplot_2spin
from tests.testdata import TWOSPIN_SLOW, AB_WINDNMR # , TWOSPIN_COALESCE, TWOSPIN_FAST
from nmrtools.nmrmath import two_spin, d2s_func, TwoSinglets, dnmr_AB
def get_intensity(spectrum, x):
"""
A quick and dirty method to get intensity of data point closest to
frequency x. Better: interpolate between two data points if match isn't
exact (TODO?)
:param spectrum: tuple of (x, y) arrays for frequency, intensity data
:param x: frequency lookup
:return: the intensity at that frequency
"""
nearest_x_index = np.abs(spectrum[0] - x).argmin()
return spectrum[1][nearest_x_index]
def get_maxima(spectrum):
"""
Crude function that returns maxima in the spectrum.
:param spectrum: tuple of frequency, intensity arrays
:return: a list of (frequency, intensity) tuples for individual maxima.
"""
res = []
for n, val in enumerate(spectrum[1][1:-2]):
index = n+1 # start at spectrum[1][1]
lastvalue = spectrum[1][index-1]
nextvalue = spectrum[1][index+1]
if lastvalue < val and nextvalue < val:
print('MAXIMUM FOUND AT: ')
print((spectrum[0][index], val))
res.append((spectrum[0][index], val))
return res
def test_two_spin_slow_exchange():
spectrum = TWOSPIN_SLOW
peaks = get_maxima(spectrum)
print("Maxima: ", peaks)
args = (165, 135, 1.5, 0.5, 0.5, 0.5)
x = np.linspace(85, 215, 800)
y = two_spin(x, *args)
popplot(x, y)
for peak in peaks:
print('Testing vs. accepted peak at: ', peak)
calculated_intensity = two_spin(peak[0], *args)
print('i.e. input of frequency ', peak[0], ' should give output of '
'intensity ', peak[1])
print('Calculated intensity is actually: ', calculated_intensity)
np.testing.assert_almost_equal(calculated_intensity,
peak[1])
def test_d2s_func_slow_exchange():
spectrum = TWOSPIN_SLOW
peaks = get_maxima(spectrum)
print("Maxima: ", peaks)
intensity_calculator = d2s_func(165, 135, 1.5, 0.5, 0.5, 0.5)
x = np.linspace(85, 215, 800)
y = intensity_calculator(x)
popplot(x, y)
print('Testing intensity calculator on 135: ', intensity_calculator(135))
print('Testing intensity calculator on 165: ', intensity_calculator(165))
for peak in peaks:
print('Testing vs. accepted peak at: ', peak)
calculated_intensity = intensity_calculator(peak[0])
print('i.e. input of frequency ', peak[0], ' should give output of '
'intensity ', peak[1])
print('Calculated intensity is actually: ', calculated_intensity)
np.testing.assert_almost_equal(calculated_intensity,
peak[1])
def test_TwoSinglets_slow_exchange():
spectrum = TWOSPIN_SLOW
peaks = get_maxima(spectrum)
print("Maxima: ", peaks)
Simulation = TwoSinglets(165, 135, 1.5, 0.5, 0.5, 50)
popplot(*Simulation.spectrum())
print('Testing intensity calculator on 135: ', Simulation.intensity(135))
print('Testing intensity calculator on 165: ', Simulation.intensity(165))
for peak in peaks:
print('Testing vs. accepted peak at: ', peak)
calculated_intensity = Simulation.intensity(peak[0])
print('i.e. input of frequency ', peak[0], ' should give output of '
'intensity ', peak[1])
print('Calculated intensity is actually: ', calculated_intensity)
np.testing.assert_almost_equal(calculated_intensity,
peak[1])
def test_ab_WINDNMR_defaults():
spectrum = AB_WINDNMR
peaks = get_maxima(spectrum)
print("Maxima: ", peaks)
ab_args = (165, 135, 12, 12, 0.5)
for peak in peaks:
print('Testing vs. accepted peak at: ', peak)
calculated_intensity = dnmr_AB(peak[0], *ab_args)
print('i.e. input of frequency ', peak[0], ' should give output of '
'intensity ', peak[1])
print('Calculated intensity is actually: ', calculated_intensity)
np.testing.assert_almost_equal(calculated_intensity,
peak[1])
| [
"numpy.abs",
"numpy.testing.assert_almost_equal",
"nmrtools.nmrmath.dnmr_AB",
"nmrtools.nmrmath.two_spin",
"numpy.linspace",
"tests.plottools.popplot",
"nmrtools.nmrmath.TwoSinglets",
"nmrtools.nmrmath.d2s_func"
] | [((1546, 1571), 'numpy.linspace', 'np.linspace', (['(85)', '(215)', '(800)'], {}), '(85, 215, 800)\n', (1557, 1571), True, 'import numpy as np\n'), ((1580, 1598), 'nmrtools.nmrmath.two_spin', 'two_spin', (['x', '*args'], {}), '(x, *args)\n', (1588, 1598), False, 'from nmrtools.nmrmath import two_spin, d2s_func, TwoSinglets, dnmr_AB\n'), ((1603, 1616), 'tests.plottools.popplot', 'popplot', (['x', 'y'], {}), '(x, y)\n', (1610, 1616), False, 'from tests.plottools import popplot\n'), ((2205, 2243), 'nmrtools.nmrmath.d2s_func', 'd2s_func', (['(165)', '(135)', '(1.5)', '(0.5)', '(0.5)', '(0.5)'], {}), '(165, 135, 1.5, 0.5, 0.5, 0.5)\n', (2213, 2243), False, 'from nmrtools.nmrmath import two_spin, d2s_func, TwoSinglets, dnmr_AB\n'), ((2253, 2278), 'numpy.linspace', 'np.linspace', (['(85)', '(215)', '(800)'], {}), '(85, 215, 800)\n', (2264, 2278), True, 'import numpy as np\n'), ((2315, 2328), 'tests.plottools.popplot', 'popplot', (['x', 'y'], {}), '(x, y)\n', (2322, 2328), False, 'from tests.plottools import popplot\n'), ((3072, 3112), 'nmrtools.nmrmath.TwoSinglets', 'TwoSinglets', (['(165)', '(135)', '(1.5)', '(0.5)', '(0.5)', '(50)'], {}), '(165, 135, 1.5, 0.5, 0.5, 50)\n', (3083, 3112), False, 'from nmrtools.nmrmath import two_spin, d2s_func, TwoSinglets, dnmr_AB\n'), ((1726, 1750), 'nmrtools.nmrmath.two_spin', 'two_spin', (['peak[0]', '*args'], {}), '(peak[0], *args)\n', (1734, 1750), False, 'from nmrtools.nmrmath import two_spin, d2s_func, TwoSinglets, dnmr_AB\n'), ((1949, 2010), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['calculated_intensity', 'peak[1]'], {}), '(calculated_intensity, peak[1])\n', (1979, 2010), True, 'import numpy as np\n'), ((2823, 2884), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['calculated_intensity', 'peak[1]'], {}), '(calculated_intensity, peak[1])\n', (2853, 2884), True, 'import numpy as np\n'), ((3643, 3704), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['calculated_intensity', 'peak[1]'], {}), '(calculated_intensity, peak[1])\n', (3673, 3704), True, 'import numpy as np\n'), ((4014, 4040), 'nmrtools.nmrmath.dnmr_AB', 'dnmr_AB', (['peak[0]', '*ab_args'], {}), '(peak[0], *ab_args)\n', (4021, 4040), False, 'from nmrtools.nmrmath import two_spin, d2s_func, TwoSinglets, dnmr_AB\n'), ((4276, 4337), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['calculated_intensity', 'peak[1]'], {}), '(calculated_intensity, peak[1])\n', (4306, 4337), True, 'import numpy as np\n'), ((672, 695), 'numpy.abs', 'np.abs', (['(spectrum[0] - x)'], {}), '(spectrum[0] - x)\n', (678, 695), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import os
def plot_kmers() :
plt.rcParams['figure.figsize'] = 12, 8
plt.rcParams['xtick.labelsize'] = 14
plt.rcParams['ytick.labelsize'] = 14
ip_dir = "data"
kmers_from_reads = ip_dir + "/kmers_from_reads"
trusted_DSK_counts_acc_to_fsY = ip_dir + "/trusted_DSK_counts_acc_to_fsY"
op_dir = "output"
op_file = op_dir + "/kmerplot.png"
#print "Generating k-mer plot"
# if folder "output" doesn't exist, create the folder
if not os.path.exists(op_dir):
os.makedirs(op_dir)
# if file op_r1.fastq exists, remove it
if os.path.isfile(op_file):
os.remove(op_file)
# plotting kmer abundances
abundance_list_fsY = []
with open(kmers_from_reads, "r") as f:
for line in f:
abundance = float(str(line).strip().split()[1])
abundance_list_fsY.append(abundance)
abundance_list_trusted_genes = []
with open(trusted_DSK_counts_acc_to_fsY, "r") as f:
for line in f:
abundance = float(str(line).strip().split()[1])
abundance_list_trusted_genes.append(abundance)
sns.set_style("white")
bins_master = np.linspace(0, 1000, 1000)
(n1, bins1, patches1) = plt.hist(abundance_list_trusted_genes, bins=bins_master, label='hst')
(n2, bins2, patches2) = plt.hist(abundance_list_fsY, bins=bins_master, label='hst')
plt.clf()
hum_gene_kmers_counts = []
with open(trusted_DSK_counts_acc_to_fsY, "r") as f:
for line in f:
abundance = float(str(line).split(' ')[1])
if abundance > 0.0:
hum_gene_kmers_counts.append(abundance)
h = np.array(hum_gene_kmers_counts)
five_pc = np.percentile(h, 5)
ninety_five_pc = np.percentile(h, 95)
xs = [i for i in range(0, 999, 1)]
len(xs)
sns.set_style("white")
plt.plot(xs, n2, sns.xkcd_rgb["denim blue"], lw=5, label="K-mers from enriched data")
plt.plot(xs, n1, sns.xkcd_rgb["pale red"], lw=5, label="Trusted-gene-kmers", )
plt.legend(loc='upper right', fontsize=24)
axes = plt.gca()
# increase size of labels
plt.tick_params(labelsize=24)
# add commas
axes.get_yaxis().set_major_formatter(plt.FuncFormatter(lambda x, loc: "{:,}".format(int(x))))
axes.get_xaxis().set_major_formatter(plt.FuncFormatter(lambda x, loc: "{:,}".format(int(x))))
# set limits
y_upper = 100000
axes.set_xlim([3, ninety_five_pc + 50])
axes.set_ylim([0, y_upper])
# draw a vline
plt.plot((five_pc, five_pc), (0, y_upper), sns.xkcd_rgb["black"], lw=4, linestyle='--', label="Trusted-genes")
plt.xlabel('Abundance', fontsize=24)
plt.ylabel('Number of kmers', fontsize=24)
# adding a panel label
# axes.text(-0.16, 1.03, "B", transform=axes.transAxes,fontsize=32, fontweight='bold', va='top', ha='right')
# plt.show()
plt.savefig(op_file, bbox_inches='tight')
| [
"seaborn.set_style",
"os.remove",
"os.makedirs",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"os.path.exists",
"numpy.percentile",
"os.path.isfile",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tic... | [((652, 675), 'os.path.isfile', 'os.path.isfile', (['op_file'], {}), '(op_file)\n', (666, 675), False, 'import os\n'), ((1182, 1204), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (1195, 1204), True, 'import seaborn as sns\n'), ((1224, 1250), 'numpy.linspace', 'np.linspace', (['(0)', '(1000)', '(1000)'], {}), '(0, 1000, 1000)\n', (1235, 1250), True, 'import numpy as np\n'), ((1280, 1349), 'matplotlib.pyplot.hist', 'plt.hist', (['abundance_list_trusted_genes'], {'bins': 'bins_master', 'label': '"""hst"""'}), "(abundance_list_trusted_genes, bins=bins_master, label='hst')\n", (1288, 1349), True, 'import matplotlib.pyplot as plt\n'), ((1379, 1438), 'matplotlib.pyplot.hist', 'plt.hist', (['abundance_list_fsY'], {'bins': 'bins_master', 'label': '"""hst"""'}), "(abundance_list_fsY, bins=bins_master, label='hst')\n", (1387, 1438), True, 'import matplotlib.pyplot as plt\n'), ((1444, 1453), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1451, 1453), True, 'import matplotlib.pyplot as plt\n'), ((1717, 1748), 'numpy.array', 'np.array', (['hum_gene_kmers_counts'], {}), '(hum_gene_kmers_counts)\n', (1725, 1748), True, 'import numpy as np\n'), ((1763, 1782), 'numpy.percentile', 'np.percentile', (['h', '(5)'], {}), '(h, 5)\n', (1776, 1782), True, 'import numpy as np\n'), ((1804, 1824), 'numpy.percentile', 'np.percentile', (['h', '(95)'], {}), '(h, 95)\n', (1817, 1824), True, 'import numpy as np\n'), ((1882, 1904), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (1895, 1904), True, 'import seaborn as sns\n'), ((1910, 2000), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'n2', "sns.xkcd_rgb['denim blue']"], {'lw': '(5)', 'label': '"""K-mers from enriched data"""'}), "(xs, n2, sns.xkcd_rgb['denim blue'], lw=5, label=\n 'K-mers from enriched data')\n", (1918, 2000), True, 'import matplotlib.pyplot as plt\n'), ((2000, 2076), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'n1', "sns.xkcd_rgb['pale red']"], {'lw': '(5)', 'label': '"""Trusted-gene-kmers"""'}), "(xs, n1, sns.xkcd_rgb['pale red'], lw=5, label='Trusted-gene-kmers')\n", (2008, 2076), True, 'import matplotlib.pyplot as plt\n'), ((2084, 2126), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'fontsize': '(24)'}), "(loc='upper right', fontsize=24)\n", (2094, 2126), True, 'import matplotlib.pyplot as plt\n'), ((2138, 2147), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2145, 2147), True, 'import matplotlib.pyplot as plt\n'), ((2183, 2212), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '(24)'}), '(labelsize=24)\n', (2198, 2212), True, 'import matplotlib.pyplot as plt\n'), ((2566, 2680), 'matplotlib.pyplot.plot', 'plt.plot', (['(five_pc, five_pc)', '(0, y_upper)', "sns.xkcd_rgb['black']"], {'lw': '(4)', 'linestyle': '"""--"""', 'label': '"""Trusted-genes"""'}), "((five_pc, five_pc), (0, y_upper), sns.xkcd_rgb['black'], lw=4,\n linestyle='--', label='Trusted-genes')\n", (2574, 2680), True, 'import matplotlib.pyplot as plt\n'), ((2682, 2718), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Abundance"""'], {'fontsize': '(24)'}), "('Abundance', fontsize=24)\n", (2692, 2718), True, 'import matplotlib.pyplot as plt\n'), ((2723, 2765), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of kmers"""'], {'fontsize': '(24)'}), "('Number of kmers', fontsize=24)\n", (2733, 2765), True, 'import matplotlib.pyplot as plt\n'), ((2930, 2971), 'matplotlib.pyplot.savefig', 'plt.savefig', (['op_file'], {'bbox_inches': '"""tight"""'}), "(op_file, bbox_inches='tight')\n", (2941, 2971), True, 'import matplotlib.pyplot as plt\n'), ((548, 570), 'os.path.exists', 'os.path.exists', (['op_dir'], {}), '(op_dir)\n', (562, 570), False, 'import os\n'), ((580, 599), 'os.makedirs', 'os.makedirs', (['op_dir'], {}), '(op_dir)\n', (591, 599), False, 'import os\n'), ((685, 703), 'os.remove', 'os.remove', (['op_file'], {}), '(op_file)\n', (694, 703), False, 'import os\n')] |
#!/usr/bin/env python
""" create a figure showing an adjacency matrix in brain space
"""
import networkx as nx
import numpy as N
import matplotlib.pyplot as plt
import nibabel as nib
roilabelfile='/work/01329/poldrack/software_lonestar/atlases/sc_HO_atlas/ROIlabels.txt'
atlasroifile='/scratch/01329/poldrack/openfmri/shared2/scatlas_goodcols.npy'
adjmtxfile='resid_adjcount.txt'
thresh=10
roicoords={}
roinames={}
f=open(roilabelfile,'r')
for l in f.readlines():
l_s=l.strip().split('\t')
if not l_s[0]=='ROI':
# convert coords to voxel index space
roicoords[int(l_s[0])]=[int(l_s[1]),int(l_s[2]),int(l_s[3])]
roinames[int(l_s[0])]=l_s[4]
atlasrois=N.load(atlasroifile)[0]
atlasroipositions_xy={}
atlasroipositions_xz={}
atlasroipositions_yz={}
for r in range(len(atlasrois)):
atlasroipositions_xy[r]=roicoords[atlasrois[r]+1][0:2]
atlasroipositions_xz[r]=[roicoords[atlasrois[r]+1][0],roicoords[atlasrois[r]+1][2]]
atlasroipositions_yz[r]=roicoords[atlasrois[r]+1][1:3]
# create dictionary for positions of each ROI
# create nx graph adj. matrix
adj=N.genfromtxt(adjmtxfile)
adj=adj*(adj>thresh).astype('int')
G=nx.from_numpy_matrix(adj)
weights = [x[2]['weight'] for x in G.edges(data=True)]
weights=weights-N.min(weights)+1
# draw graph
plt.figure(num=None,figsize=(12,8))
plt.subplot(221)
#plt.imshow(anat_xy)
nx.draw_networkx(G,pos=atlasroipositions_xy,with_labels=False,node_size=10,edge_color=weights,edge_cmap=plt.cm.Reds,width=4)
plt.subplot(222)
#plt.imshow(anat_yz)
nx.draw_networkx(G,pos=atlasroipositions_xz,with_labels=False,node_size=10,edge_color=weights,edge_cmap=plt.cm.Reds,width=4)
plt.subplot(223)
nx.draw_networkx(G,pos=atlasroipositions_yz,with_labels=False,node_size=10,edge_color=weights,edge_cmap=plt.cm.Reds,width=4)
plt.show()
#plt.savefig(',dpi=300)
#plt.show()
| [
"matplotlib.pyplot.subplot",
"numpy.load",
"matplotlib.pyplot.show",
"networkx.from_numpy_matrix",
"numpy.genfromtxt",
"networkx.draw_networkx",
"matplotlib.pyplot.figure",
"numpy.min"
] | [((1111, 1135), 'numpy.genfromtxt', 'N.genfromtxt', (['adjmtxfile'], {}), '(adjmtxfile)\n', (1123, 1135), True, 'import numpy as N\n'), ((1174, 1199), 'networkx.from_numpy_matrix', 'nx.from_numpy_matrix', (['adj'], {}), '(adj)\n', (1194, 1199), True, 'import networkx as nx\n'), ((1303, 1340), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': 'None', 'figsize': '(12, 8)'}), '(num=None, figsize=(12, 8))\n', (1313, 1340), True, 'import matplotlib.pyplot as plt\n'), ((1340, 1356), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(221)'], {}), '(221)\n', (1351, 1356), True, 'import matplotlib.pyplot as plt\n'), ((1378, 1513), 'networkx.draw_networkx', 'nx.draw_networkx', (['G'], {'pos': 'atlasroipositions_xy', 'with_labels': '(False)', 'node_size': '(10)', 'edge_color': 'weights', 'edge_cmap': 'plt.cm.Reds', 'width': '(4)'}), '(G, pos=atlasroipositions_xy, with_labels=False, node_size=\n 10, edge_color=weights, edge_cmap=plt.cm.Reds, width=4)\n', (1394, 1513), True, 'import networkx as nx\n'), ((1504, 1520), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(222)'], {}), '(222)\n', (1515, 1520), True, 'import matplotlib.pyplot as plt\n'), ((1543, 1678), 'networkx.draw_networkx', 'nx.draw_networkx', (['G'], {'pos': 'atlasroipositions_xz', 'with_labels': '(False)', 'node_size': '(10)', 'edge_color': 'weights', 'edge_cmap': 'plt.cm.Reds', 'width': '(4)'}), '(G, pos=atlasroipositions_xz, with_labels=False, node_size=\n 10, edge_color=weights, edge_cmap=plt.cm.Reds, width=4)\n', (1559, 1678), True, 'import networkx as nx\n'), ((1669, 1685), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(223)'], {}), '(223)\n', (1680, 1685), True, 'import matplotlib.pyplot as plt\n'), ((1687, 1822), 'networkx.draw_networkx', 'nx.draw_networkx', (['G'], {'pos': 'atlasroipositions_yz', 'with_labels': '(False)', 'node_size': '(10)', 'edge_color': 'weights', 'edge_cmap': 'plt.cm.Reds', 'width': '(4)'}), '(G, pos=atlasroipositions_yz, with_labels=False, node_size=\n 10, edge_color=weights, edge_cmap=plt.cm.Reds, width=4)\n', (1703, 1822), True, 'import networkx as nx\n'), ((1814, 1824), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1822, 1824), True, 'import matplotlib.pyplot as plt\n'), ((688, 708), 'numpy.load', 'N.load', (['atlasroifile'], {}), '(atlasroifile)\n', (694, 708), True, 'import numpy as N\n'), ((1272, 1286), 'numpy.min', 'N.min', (['weights'], {}), '(weights)\n', (1277, 1286), True, 'import numpy as N\n')] |
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import utils as tu
from config import cfg
from LeeNet import LeeNet
x_train, y_train = tu.load_data(True)
# GPU 메모리 증가 허용
config = tf.ConfigProto(allow_soft_placement=True, gpu_options=tf.GPUOptions(allow_growth=True))
# initialize
sess = tf.Session(config=config)
model = LeeNet(sess, "LeeNet")
writer = tf.summary.FileWriter(cfg.log_dir + '/LeeNet')
writer.add_graph(sess.graph)
sess.run(tf.global_variables_initializer())
# train my model
step = 0
costs = []
train_accu = []
print('Learning Started!')
for epoch in range(cfg.epoch):
avg_cost = 0
avg_accu = 0
total_batch = int(x_train.shape[0] / cfg.b_size)
minibatches = tu.random_mini_batches(x_train, y_train, cfg.b_size)
for i in minibatches:
(batch_xs, batch_ys) = i
_, temp_cost, temp_accu, summary = model.train(batch_xs, batch_ys)
avg_cost += temp_cost / total_batch
avg_accu += temp_accu / total_batch
writer.add_summary(summary, global_step=step)
step += 1
costs.append(avg_cost)
train_accu.append(avg_accu)
print('Epoch', '%04d' % (epoch + 1),
': cost =', '{:.9f}'.format(avg_cost), '| accuracy =', '{:.9f}'.format(avg_accu))
print('Learning Finished!')
# Show plots of costs and train accuracy
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('epochs')
plt.title("LeeNet Model Costs")
plt.show()
plt.plot(np.squeeze(train_accu))
plt.ylabel('train accuracy')
plt.xlabel('epochs')
plt.title("LeeNet Model Train accuracy")
plt.show()
# Save model
saver = tf.train.Saver()
saver.save(sess, cfg.save)
print("Model saved in file: ", cfg.save)
| [
"matplotlib.pyplot.title",
"utils.load_data",
"matplotlib.pyplot.show",
"tensorflow.train.Saver",
"utils.random_mini_batches",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.GPUOptions",
"tensorflow.summary.FileWriter",
"numpy.squeeze",
"matplotlib.pyplot.ylabel",
... | [((172, 190), 'utils.load_data', 'tu.load_data', (['(True)'], {}), '(True)\n', (184, 190), True, 'import utils as tu\n'), ((334, 359), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (344, 359), True, 'import tensorflow as tf\n'), ((369, 391), 'LeeNet.LeeNet', 'LeeNet', (['sess', '"""LeeNet"""'], {}), "(sess, 'LeeNet')\n", (375, 391), False, 'from LeeNet import LeeNet\n'), ((402, 448), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["(cfg.log_dir + '/LeeNet')"], {}), "(cfg.log_dir + '/LeeNet')\n", (423, 448), True, 'import tensorflow as tf\n'), ((1421, 1439), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""cost"""'], {}), "('cost')\n", (1431, 1439), True, 'import matplotlib.pyplot as plt\n'), ((1441, 1461), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (1451, 1461), True, 'import matplotlib.pyplot as plt\n'), ((1463, 1494), 'matplotlib.pyplot.title', 'plt.title', (['"""LeeNet Model Costs"""'], {}), "('LeeNet Model Costs')\n", (1472, 1494), True, 'import matplotlib.pyplot as plt\n'), ((1496, 1506), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1504, 1506), True, 'import matplotlib.pyplot as plt\n'), ((1544, 1572), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""train accuracy"""'], {}), "('train accuracy')\n", (1554, 1572), True, 'import matplotlib.pyplot as plt\n'), ((1574, 1594), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (1584, 1594), True, 'import matplotlib.pyplot as plt\n'), ((1596, 1636), 'matplotlib.pyplot.title', 'plt.title', (['"""LeeNet Model Train accuracy"""'], {}), "('LeeNet Model Train accuracy')\n", (1605, 1636), True, 'import matplotlib.pyplot as plt\n'), ((1638, 1648), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1646, 1648), True, 'import matplotlib.pyplot as plt\n'), ((1676, 1692), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (1690, 1692), True, 'import tensorflow as tf\n'), ((489, 522), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (520, 522), True, 'import tensorflow as tf\n'), ((758, 810), 'utils.random_mini_batches', 'tu.random_mini_batches', (['x_train', 'y_train', 'cfg.b_size'], {}), '(x_train, y_train, cfg.b_size)\n', (780, 810), True, 'import utils as tu\n'), ((1401, 1418), 'numpy.squeeze', 'np.squeeze', (['costs'], {}), '(costs)\n', (1411, 1418), True, 'import numpy as np\n'), ((1519, 1541), 'numpy.squeeze', 'np.squeeze', (['train_accu'], {}), '(train_accu)\n', (1529, 1541), True, 'import numpy as np\n'), ((274, 306), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'allow_growth': '(True)'}), '(allow_growth=True)\n', (287, 306), True, 'import tensorflow as tf\n')] |
"""@package reduced_cg
Reduced conjugate gradient solver for the trust region problem with linear constraints.
"""
import numpy as np
import numpy.linalg as la
from .model_minimizer import trust_region_intersect, model_minimizer
def reduced_cg(g, c, a, b, delta):
""" Solves a quadratic problem subjected to linear and trust-region constraints.
:param np.array g: (n, n) Matrix in quadratic problem.
:param np.array c: (n, 1) Vector in quadratic problem.
:param np.array a: (m, n) Linear coefficients of x in equality constraints, m constraints are specified, m < n.
:param np.array b: (m, 1) Vector of constants in equality constraints.
:param float delta: Trust-region radius.
:return np.array: (n, 1) Trajectory of x that minimizes the quadratic problem.
This function is based on Algorithm 16.2 from [1], pg. 461. An additional trust-region constraint is added to
the algorithm based on the 2-norm of the x_z vector. The original quadratic problem to solve is
minimize x^T . c + 1/2 x^T . G . x
(P1) x
subject to A . x = b,
||x||_2 <= Delta
The equivalent reduced problem solved by this function is
minimize x^T . c_z + 1/2 x_z^T . Z^T . G . Z . x_z
(P2) x_z
subject to ||x||_2 <= Delta
For the trust-region SQP optimization problem, the parameters are defined for the k'th increment in (P1) as:
- The parameters are defined as follows: [reduced_cg()] = [(P1)] = [SQP problem]
- g = G = hess_xx[L(x_k)]
- c = c = grad_x[f(x_k)]
- a = A = jacobian[g(x_k)]
- b = b = -g*(x_k) <-- notice the minus sign
where L is the Lagrangian, f is the objective function, and g* are the equality constraints formed from inequality
constraints using slack variables. The transformation from (P1) -> (P2) is handled by this function internally. An
additional guard is added when negative curvature is encountered.
If the constraints A.x == b and ||x|| < Delta are infeasible, then a relaxation is calculated (Byrd-Omojokun
approach). The relaxation is calculated by solving a trust-region subproblem nearly exactly using the
model-minimizer method.
References:
[1] <NAME> (2006) "Numerical optimization"
[2] Gould et al. (2001) "On the solution of equality constrained quadratic programming problems arising in
optimization"
[3] Bierlaire (2015) "Optimization: Principles and Algorithms"
[4] Conn et al. (2000) "Trust region methods"
"""
tol = np.sqrt(np.finfo(float).eps)
radius_reduction_factor = 0.8 # necessary to allow some "slack" in the constraint to reduce the objective function
m, n = np.shape(a)
max_iter = n - m + 1
# Calculate the orthonormal range and null-space of A^T
q, _ = la.qr(a.transpose(), mode='complete')
y = q[:, :m] # basis for the range of A^T
z = q[:, m:] # basis for the null-space of A^T
# Check that the x_y component of the solution is within the trust region
x_y = la.solve(np.matmul(a, y), b) # x_y satisfies the constraint A.x == b
x_sol_prev = np.matmul(y, x_y)
if la.norm(x_sol_prev, 2) > delta * radius_reduction_factor:
# The constraints are not feasible with the trust-region, so calculate a relaxation and continue
# Choose r = a.v - b => then we solve min_x x.g.x + 1/2 x.c ; s.t a.x == r --> replace b with r
print ('Applying a relaxation to the constraint.')
v = solve_normal_step(a, -b, radius_reduction_factor * delta)
r = np.matmul(a, v) # relaxation
x_y = la.solve(np.matmul(a, y), r)
x_sol_prev = np.matmul(y, x_y)
if la.norm(x_sol_prev, 2) > delta:
# Still not feasible, something went wrong
raise ValueError('Trust-region is incompatible with the constraints')
# Initialize values for the algorithm
h = np.diag(np.abs(np.diag(g))) # try Jacobi preconditioner
h[h < 1.e-10 * np.max(h)] = 1.
w_zz = np.matmul(z.transpose(), np.matmul(h, z))
w_zz_inv = la.inv(w_zz)
c_z = reduce(np.dot, [z.transpose(), g, y, x_y]) + np.matmul(z.transpose(), c)
x_z = np.zeros((n - m, 1)) # necessary to start at 0 to find the trust-region intersection, see ref. [2]
r_z = reduce(np.dot, [z.transpose(), g, z, x_z]) + c_z
g_z = np.matmul(w_zz_inv, r_z)
d_z = -1.0 * g_z
convergence_criteria = float(np.abs(np.dot(r_z.transpose(), np.dot(w_zz_inv, r_z))))
iteration = 0
while convergence_criteria > tol and iteration < max_iter:
# Calculate the step length
alpha = float(np.dot(r_z.transpose(), g_z) / reduce(np.dot, [d_z.transpose(), z.transpose(), g, z, d_z]))
# Test for negative curvature
if reduce(np.dot, [d_z.transpose(), z.transpose(), g, z, d_z]) < 0.:
# Go to the boundary of the trust-region
# For method, see ref. [3] pg. 298
step_factor = trust_region_intersect(x_sol_prev, np.matmul(z, alpha * d_z), delta)
x_z = x_z + step_factor * alpha * d_z
break
# Test the trust-region constraint
x_sol_trial = x_sol_prev + np.matmul(z, alpha * d_z)
if la.norm(x_sol_trial, 2) >= delta:
# We (miraculously) hit the boundary of the trust-region
if la.norm(x_sol_trial, 2) == delta:
x_z = x_z + alpha * d_z
break
else:
# Find the intersection with the trust-region between the previous point and the trial point
step_factor = trust_region_intersect(x_sol_prev, np.matmul(z, alpha * d_z), delta)
x_z = x_z + step_factor * alpha * d_z
break
# OK, calculate the next CG iteration
x_z = x_z + alpha * d_z
x_sol_prev = x_sol_trial
r_z_p = r_z + alpha * reduce(np.dot, [z.transpose(), g, z, d_z])
g_z_p = np.matmul(w_zz_inv, r_z_p)
beta = np.dot(r_z_p.transpose(), g_z_p) / np.dot(r_z.transpose(), g_z)
d_z = -1. * g_z_p + beta * d_z
g_z = g_z_p
r_z = r_z_p
convergence_criteria = float(np.abs(np.dot(r_z.transpose(), np.dot(w_zz_inv, r_z))))
iteration = iteration + 1
# Get the solution point, recalculate to minimize any round-off errors
# Total step considers the x_y step to satisfy the constraints, and the x_z step to minimize the obj. fun
x_sol = np.matmul(y, x_y) + np.matmul(z, x_z)
# Calculate the Lagrange multipliers, see [1] pg. 538
lam_sol = la.solve(np.matmul(a, y).transpose(), np.matmul(y.transpose(), c + np.matmul(g, x_sol)))
return [x_sol, lam_sol]
def solve_normal_step(a, c, radius):
""" Returns the solution to the normal subproblem.
:param np.array a: (m, n) Jacobian of the constraint function.
:param np.array c: (m, 1) Constraint function values.
:param float radius: Trust-region radius.
:return np.array : (n, 1) Solution to the normal subproblem.
The normal subproblem is defined as
minimize ||A_k . v + c_k||_2^2
v
subject to ||v||_2 <= Delta
where A_k is the Jacobian of the constraints, c_k is the constraint function, and Delta is the trust-region
radius. The model minimizer method is used to solve this trust-region problem. See [4] pg. 547 for details.
"""
h = np.matmul(a.transpose(), a)
g = np.matmul(a.transpose(), c)
d_x = model_minimizer(h, g, radius)
return d_x
| [
"numpy.zeros",
"numpy.shape",
"numpy.finfo",
"numpy.max",
"numpy.linalg.norm",
"numpy.linalg.inv",
"numpy.matmul",
"numpy.dot",
"numpy.diag"
] | [((2741, 2752), 'numpy.shape', 'np.shape', (['a'], {}), '(a)\n', (2749, 2752), True, 'import numpy as np\n'), ((3163, 3180), 'numpy.matmul', 'np.matmul', (['y', 'x_y'], {}), '(y, x_y)\n', (3172, 3180), True, 'import numpy as np\n'), ((4099, 4111), 'numpy.linalg.inv', 'la.inv', (['w_zz'], {}), '(w_zz)\n', (4105, 4111), True, 'import numpy.linalg as la\n'), ((4205, 4225), 'numpy.zeros', 'np.zeros', (['(n - m, 1)'], {}), '((n - m, 1))\n', (4213, 4225), True, 'import numpy as np\n'), ((4374, 4398), 'numpy.matmul', 'np.matmul', (['w_zz_inv', 'r_z'], {}), '(w_zz_inv, r_z)\n', (4383, 4398), True, 'import numpy as np\n'), ((3085, 3100), 'numpy.matmul', 'np.matmul', (['a', 'y'], {}), '(a, y)\n', (3094, 3100), True, 'import numpy as np\n'), ((3188, 3210), 'numpy.linalg.norm', 'la.norm', (['x_sol_prev', '(2)'], {}), '(x_sol_prev, 2)\n', (3195, 3210), True, 'import numpy.linalg as la\n'), ((3596, 3611), 'numpy.matmul', 'np.matmul', (['a', 'v'], {}), '(a, v)\n', (3605, 3611), True, 'import numpy as np\n'), ((3690, 3707), 'numpy.matmul', 'np.matmul', (['y', 'x_y'], {}), '(y, x_y)\n', (3699, 3707), True, 'import numpy as np\n'), ((4067, 4082), 'numpy.matmul', 'np.matmul', (['h', 'z'], {}), '(h, z)\n', (4076, 4082), True, 'import numpy as np\n'), ((5953, 5979), 'numpy.matmul', 'np.matmul', (['w_zz_inv', 'r_z_p'], {}), '(w_zz_inv, r_z_p)\n', (5962, 5979), True, 'import numpy as np\n'), ((6464, 6481), 'numpy.matmul', 'np.matmul', (['y', 'x_y'], {}), '(y, x_y)\n', (6473, 6481), True, 'import numpy as np\n'), ((6484, 6501), 'numpy.matmul', 'np.matmul', (['z', 'x_z'], {}), '(z, x_z)\n', (6493, 6501), True, 'import numpy as np\n'), ((2589, 2604), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (2597, 2604), True, 'import numpy as np\n'), ((3649, 3664), 'numpy.matmul', 'np.matmul', (['a', 'y'], {}), '(a, y)\n', (3658, 3664), True, 'import numpy as np\n'), ((3719, 3741), 'numpy.linalg.norm', 'la.norm', (['x_sol_prev', '(2)'], {}), '(x_sol_prev, 2)\n', (3726, 3741), True, 'import numpy.linalg as la\n'), ((3954, 3964), 'numpy.diag', 'np.diag', (['g'], {}), '(g)\n', (3961, 3964), True, 'import numpy as np\n'), ((5199, 5224), 'numpy.matmul', 'np.matmul', (['z', '(alpha * d_z)'], {}), '(z, alpha * d_z)\n', (5208, 5224), True, 'import numpy as np\n'), ((5236, 5259), 'numpy.linalg.norm', 'la.norm', (['x_sol_trial', '(2)'], {}), '(x_sol_trial, 2)\n', (5243, 5259), True, 'import numpy.linalg as la\n'), ((4015, 4024), 'numpy.max', 'np.max', (['h'], {}), '(h)\n', (4021, 4024), True, 'import numpy as np\n'), ((4485, 4506), 'numpy.dot', 'np.dot', (['w_zz_inv', 'r_z'], {}), '(w_zz_inv, r_z)\n', (4491, 4506), True, 'import numpy as np\n'), ((5018, 5043), 'numpy.matmul', 'np.matmul', (['z', '(alpha * d_z)'], {}), '(z, alpha * d_z)\n', (5027, 5043), True, 'import numpy as np\n'), ((5354, 5377), 'numpy.linalg.norm', 'la.norm', (['x_sol_trial', '(2)'], {}), '(x_sol_trial, 2)\n', (5361, 5377), True, 'import numpy.linalg as la\n'), ((6584, 6599), 'numpy.matmul', 'np.matmul', (['a', 'y'], {}), '(a, y)\n', (6593, 6599), True, 'import numpy as np\n'), ((6642, 6661), 'numpy.matmul', 'np.matmul', (['g', 'x_sol'], {}), '(g, x_sol)\n', (6651, 6661), True, 'import numpy as np\n'), ((5642, 5667), 'numpy.matmul', 'np.matmul', (['z', '(alpha * d_z)'], {}), '(z, alpha * d_z)\n', (5651, 5667), True, 'import numpy as np\n'), ((6207, 6228), 'numpy.dot', 'np.dot', (['w_zz_inv', 'r_z'], {}), '(w_zz_inv, r_z)\n', (6213, 6228), True, 'import numpy as np\n')] |
"""
Defines the Instrument class
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import collections as _collections
import numpy as _np
from pygsti.modelmembers import modelmember as _mm
from pygsti.modelmembers import operations as _op
from pygsti.modelmembers import states as _state
from pygsti.evotypes import Evotype as _Evotype
from pygsti.baseobjs import statespace as _statespace
from pygsti.tools import matrixtools as _mt
from pygsti.tools import slicetools as _slct
from pygsti.baseobjs.label import Label as _Label
from pygsti.baseobjs.statespace import StateSpace as _StateSpace
class Instrument(_mm.ModelMember, _collections.OrderedDict):
"""
A generalized quantum instrument.
Meant to correspond to a quantum instrument in theory, this class
generalizes that notion slightly to include a collection of gates that may
or may not have all of the properties associated by a mathematical quantum
instrument.
Parameters
----------
member_ops : dict of LinearOperator objects
A dict (or list of key,value pairs) of the gates.
evotype : Evotype or str, optional
The evolution type. If `None`, the evotype is inferred
from the first instrument member. If `len(member_ops) == 0` in this case,
an error is raised.
state_space : StateSpace, optional
The state space for this POVM. If `None`, the space is inferred
from the first instrument member. If `len(member_ops) == 0` in this case,
an error is raised.
items : list or dict, optional
Initial values. This should only be used internally in de-serialization.
"""
def __init__(self, member_ops, evotype=None, state_space=None, items=[]):
self._readonly = False # until init is done
if len(items) > 0:
assert(member_ops is None), "`items` was given when op_matrices != None"
if member_ops is not None:
if isinstance(member_ops, dict):
member_list = [(k, v) for k, v in member_ops.items()] # gives definite ordering
elif isinstance(member_ops, list):
member_list = member_ops # assume it's is already an ordered (key,value) list
else:
raise ValueError("Invalid `member_ops` arg of type %s" % type(member_ops))
#Special case when we're given matrices: infer a default state space and evotype:
if len(member_list) > 0 and not isinstance(member_list[0][1], _op.LinearOperator):
if state_space is None:
state_space = _statespace.default_space_for_dim(member_list[0][1].shape[0])
if evotype is None:
evotype = _Evotype.cast('default')
member_list = [(k, v if isinstance(v, _op.LinearOperator) else
_op.FullArbitraryOp(v, evotype, state_space)) for k, v in member_list]
assert(len(member_list) > 0 or state_space is not None), \
"Must specify `state_space` when there are no instrument members!"
assert(len(member_list) > 0 or evotype is not None), \
"Must specify `evotype` when there are no instrument members!"
evotype = _Evotype.cast(evotype) if (evotype is not None) else member_list[0][1].evotype
state_space = member_list[0][1].state_space if (state_space is None) \
else _statespace.StateSpace.cast(state_space)
items = []
for k, member in member_list:
assert(evotype == member.evotype), \
"All instrument members must have the same evolution type"
assert(state_space.is_compatible_with(member.state_space)), \
"All instrument members must have compatible state spaces!"
items.append((k, member))
else:
if len(items) > 0: # HACK so that OrderedDict.copy() works, which creates a new object with only items...
if state_space is None: state_space = items[0][1].state_space
if evotype is None: evotype = items[0][1].evotype
assert(state_space is not None), "`state_space` cannot be `None` when there are no members!"
assert(evotype is not None), "`evotype` cannot be `None` when there are no members!"
_collections.OrderedDict.__init__(self, items)
_mm.ModelMember.__init__(self, state_space, evotype)
self.init_gpindices()
self._readonly = True
def submembers(self):
"""
Get the ModelMember-derived objects contained in this one.
Returns
-------
list
"""
return list(self.values())
def to_memoized_dict(self, mmg_memo):
"""Create a serializable dict with references to other objects in the memo.
Parameters
----------
mmg_memo: dict
Memo dict from a ModelMemberGraph, i.e. keys are object ids and values
are ModelMemberGraphNodes (which contain the serialize_id). This is NOT
the same as other memos in ModelMember (e.g. copy, allocate_gpindices, etc.).
Returns
-------
mm_dict: dict
A dict representation of this ModelMember ready for serialization
This must have at least the following fields:
module, class, submembers, params, state_space, evotype
Additional fields may be added by derived classes.
"""
mm_dict = super().to_memoized_dict(mmg_memo)
mm_dict['member_labels'] = list(self.keys()) # labels of the submember effects
return mm_dict
@classmethod
def _from_memoized_dict(cls, mm_dict, serial_memo):
state_space = _StateSpace.from_nice_serialization(mm_dict['state_space'])
members = [(lbl, serial_memo[subm_serial_id])
for lbl, subm_serial_id in zip(mm_dict['member_labels'], mm_dict['submembers'])]
return cls(members, mm_dict['evotype'], state_space)
def _is_similar(self, other, rtol, atol):
""" Returns True if `other` model member (which it guaranteed to be the same type as self) has
the same local structure, i.e., not considering parameter values or submembers """
return list(self.keys()) == list(other.keys())
def __setitem__(self, key, value):
if self._readonly: raise ValueError("Cannot alter Instrument elements")
else: return _collections.OrderedDict.__setitem__(self, key, value)
def __reduce__(self):
""" Needed for OrderedDict-derived classes (to set dict items) """
#need to *not* pickle parent, as __reduce__ bypasses ModelMember.__getstate__
dict_to_pickle = self.__dict__.copy()
dict_to_pickle['_parent'] = None
#Note: must *copy* elements for pickling/copying
return (Instrument, (None, self.evotype, self.state_space, [(key, gate.copy()) for key, gate in self.items()]),
dict_to_pickle)
def __pygsti_reduce__(self):
return self.__reduce__()
def simplify_operations(self, prefix=""):
"""
Creates a dictionary of simplified instrument operations.
Returns a dictionary of operations that belong to the Instrument's parent
`Model` - that is, whose `gpindices` are set to all or a subset of
this instruments's gpindices. These are used internally within
computations involving the parent `Model`.
Parameters
----------
prefix : str
A string, usually identitying this instrument, which may be used
to prefix the simplified gate keys.
Returns
-------
OrderedDict of Gates
"""
#Create a "simplified" (Model-referencing) set of element gates
simplified = _collections.OrderedDict()
if isinstance(prefix, _Label): # Deal with case when prefix isn't just a string
for k, g in self.items():
simplified[_Label(prefix.name + "_" + k, prefix.sslbls)] = g
else:
if prefix: prefix += "_"
for k, g in self.items():
simplified[prefix + k] = g
return simplified
@property
def parameter_labels(self):
"""
An array of labels (usually strings) describing this model member's parameters.
"""
plabels_per_local_index = _collections.defaultdict(list)
for operation, factorgate_local_inds in zip(self.submembers(), self._submember_rpindices):
for i, plbl in zip(_slct.to_array(factorgate_local_inds), operation.parameter_labels):
plabels_per_local_index[i].append(plbl)
vl = _np.empty(self.num_params, dtype=object)
for i in range(self.num_params):
vl[i] = ', '.join(plabels_per_local_index[i])
return vl
@property
def num_elements(self):
"""
Return the number of total gate elements in this instrument.
This is in general different from the number of *parameters*,
which are the number of free variables used to generate all of
the matrix *elements*.
Returns
-------
int
"""
return sum([g.size for g in self.values()])
@property
def num_params(self):
"""
Get the number of independent parameters which specify this Instrument.
Returns
-------
int
the number of independent parameters.
"""
return len(self.gpindices_as_array())
def to_vector(self):
"""
Extract a vector of the underlying gate parameters from this Instrument.
Returns
-------
numpy array
a 1D numpy array with length == num_params().
"""
assert(self.gpindices is not None), "Must set an Instrument's .gpindices before calling to_vector"
v = _np.empty(self.num_params, 'd')
for operation, factor_local_inds in zip(self.values(), self._submember_rpindices):
v[factor_local_inds] = operation.to_vector()
return v
def from_vector(self, v, close=False, dirty_value=True):
"""
Initialize the Instrument using a vector of its parameters.
Parameters
----------
v : numpy array
The 1D vector of gate parameters. Length
must == num_params().
close : bool, optional
Whether `v` is close to this Instrument's current
set of parameters. Under some circumstances, when this
is true this call can be completed more quickly.
dirty_value : bool, optional
The value to set this object's "dirty flag" to before exiting this
call. This is passed as an argument so it can be updated *recursively*.
Leave this set to `True` unless you know what you're doing.
Returns
-------
None
"""
assert(self.gpindices is not None), "Must set an Instrument's .gpindices before calling from_vector"
for operation, factor_local_inds in zip(self.values(), self._submember_rpindices):
operation.from_vector(v[factor_local_inds], close, dirty_value)
self.dirty = dirty_value
def transform_inplace(self, s):
"""
Update each Instrument element matrix `O` with `inv(s) * O * s`.
Parameters
----------
s : GaugeGroupElement
A gauge group element which specifies the "s" matrix
(and it's inverse) used in the above similarity transform.
Returns
-------
None
"""
#Note: since each Mi is a linear function of MT and the Di, we can just
# transform the MT and Di (self.param_ops) and re-init the elements.
for gate in self.values():
gate.transform_inplace(s)
self.dirty = True
def depolarize(self, amount):
"""
Depolarize this Instrument by the given `amount`.
Parameters
----------
amount : float or tuple
The amount to depolarize by. If a tuple, it must have length
equal to one less than the dimension of the gate. All but the
first element of each spam vector (often corresponding to the
identity element) are multiplied by `amount` (if a float) or
the corresponding `amount[i]` (if a tuple).
Returns
-------
None
"""
#Note: since each Mi is a linear function of MT and the Di, we can just
# depolarize the MT and Di (self.param_ops) and re-init the elements.
for gate in self.values():
gate.depolarize(amount)
self.dirty = True
def rotate(self, amount, mx_basis='gm'):
"""
Rotate this instrument by the given `amount`.
Parameters
----------
amount : tuple of floats, optional
Specifies the rotation "coefficients" along each of the non-identity
Pauli-product axes. The gate's matrix `G` is composed with a
rotation operation `R` (so `G` -> `dot(R, G)` ) where `R` is the
unitary superoperator corresponding to the unitary operator
`U = exp( sum_k( i * rotate[k] / 2.0 * Pauli_k ) )`. Here `Pauli_k`
ranges over all of the non-identity un-normalized Pauli operators.
mx_basis : {'std', 'gm', 'pp', 'qt'} or Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
Returns
-------
None
"""
#Note: since each Mi is a linear function of MT and the Di, we can just
# rotate the MT and Di (self.param_ops) and re-init the elements.
for gate in self.values():
gate.rotate(amount, mx_basis)
self.dirty = True
def acton(self, state):
"""
Act with this instrument upon `state`
Parameters
----------
state : State
The state to act on
Returns
-------
OrderedDict
A dictionary whose keys are the outcome labels (strings)
and whose values are `(prob, normalized_state)` tuples
giving the probability of seeing the given outcome and
the resulting state that would be obtained if and when
that outcome is observed.
"""
assert(state._evotype == self._evotype), "Evolution type mismatch: %s != %s" % (self._evotype, state._evotype)
staterep = state._rep
outcome_probs_and_states = _collections.OrderedDict()
for lbl, element in self.items():
output_rep = element._rep.acton(staterep)
output_unnormalized_state = output_rep.to_dense()
prob = output_unnormalized_state[0] * state.dim**0.25
output_normalized_state = output_unnormalized_state / prob # so [0]th == 1/state_dim**0.25
outcome_probs_and_states[lbl] = (prob, _state.StaticState(output_normalized_state, self.evotype,
self.state_space))
return outcome_probs_and_states
def __str__(self):
s = "Instrument with elements:\n"
for lbl, element in self.items():
s += "%s:\n%s\n" % (lbl, _mt.mx_to_string(element.to_dense(), width=4, prec=2))
return s
| [
"collections.OrderedDict.__init__",
"pygsti.modelmembers.modelmember.ModelMember.__init__",
"pygsti.baseobjs.label.Label",
"pygsti.baseobjs.statespace.StateSpace.from_nice_serialization",
"numpy.empty",
"pygsti.tools.slicetools.to_array",
"pygsti.modelmembers.states.StaticState",
"collections.OrderedD... | [((4994, 5040), 'collections.OrderedDict.__init__', '_collections.OrderedDict.__init__', (['self', 'items'], {}), '(self, items)\n', (5027, 5040), True, 'import collections as _collections\n'), ((5049, 5101), 'pygsti.modelmembers.modelmember.ModelMember.__init__', '_mm.ModelMember.__init__', (['self', 'state_space', 'evotype'], {}), '(self, state_space, evotype)\n', (5073, 5101), True, 'from pygsti.modelmembers import modelmember as _mm\n'), ((6407, 6466), 'pygsti.baseobjs.statespace.StateSpace.from_nice_serialization', '_StateSpace.from_nice_serialization', (["mm_dict['state_space']"], {}), "(mm_dict['state_space'])\n", (6442, 6466), True, 'from pygsti.baseobjs.statespace import StateSpace as _StateSpace\n'), ((8488, 8514), 'collections.OrderedDict', '_collections.OrderedDict', ([], {}), '()\n', (8512, 8514), True, 'import collections as _collections\n'), ((9070, 9100), 'collections.defaultdict', '_collections.defaultdict', (['list'], {}), '(list)\n', (9094, 9100), True, 'import collections as _collections\n'), ((9369, 9409), 'numpy.empty', '_np.empty', (['self.num_params'], {'dtype': 'object'}), '(self.num_params, dtype=object)\n', (9378, 9409), True, 'import numpy as _np\n'), ((10580, 10611), 'numpy.empty', '_np.empty', (['self.num_params', '"""d"""'], {}), "(self.num_params, 'd')\n", (10589, 10611), True, 'import numpy as _np\n'), ((15392, 15418), 'collections.OrderedDict', '_collections.OrderedDict', ([], {}), '()\n', (15416, 15418), True, 'import collections as _collections\n'), ((7123, 7177), 'collections.OrderedDict.__setitem__', '_collections.OrderedDict.__setitem__', (['self', 'key', 'value'], {}), '(self, key, value)\n', (7159, 7177), True, 'import collections as _collections\n'), ((3883, 3905), 'pygsti.evotypes.Evotype.cast', '_Evotype.cast', (['evotype'], {}), '(evotype)\n', (3896, 3905), True, 'from pygsti.evotypes import Evotype as _Evotype\n'), ((4066, 4106), 'pygsti.baseobjs.statespace.StateSpace.cast', '_statespace.StateSpace.cast', (['state_space'], {}), '(state_space)\n', (4093, 4106), True, 'from pygsti.baseobjs import statespace as _statespace\n'), ((9231, 9268), 'pygsti.tools.slicetools.to_array', '_slct.to_array', (['factorgate_local_inds'], {}), '(factorgate_local_inds)\n', (9245, 9268), True, 'from pygsti.tools import slicetools as _slct\n'), ((15798, 15873), 'pygsti.modelmembers.states.StaticState', '_state.StaticState', (['output_normalized_state', 'self.evotype', 'self.state_space'], {}), '(output_normalized_state, self.evotype, self.state_space)\n', (15816, 15873), True, 'from pygsti.modelmembers import states as _state\n'), ((3225, 3286), 'pygsti.baseobjs.statespace.default_space_for_dim', '_statespace.default_space_for_dim', (['member_list[0][1].shape[0]'], {}), '(member_list[0][1].shape[0])\n', (3258, 3286), True, 'from pygsti.baseobjs import statespace as _statespace\n'), ((3353, 3377), 'pygsti.evotypes.Evotype.cast', '_Evotype.cast', (['"""default"""'], {}), "('default')\n", (3366, 3377), True, 'from pygsti.evotypes import Evotype as _Evotype\n'), ((8669, 8713), 'pygsti.baseobjs.label.Label', '_Label', (["(prefix.name + '_' + k)", 'prefix.sslbls'], {}), "(prefix.name + '_' + k, prefix.sslbls)\n", (8675, 8713), True, 'from pygsti.baseobjs.label import Label as _Label\n'), ((3489, 3533), 'pygsti.modelmembers.operations.FullArbitraryOp', '_op.FullArbitraryOp', (['v', 'evotype', 'state_space'], {}), '(v, evotype, state_space)\n', (3508, 3533), True, 'from pygsti.modelmembers import operations as _op\n')] |
# coding: utf-8
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from scipy import optimize
import math
import logging
import sys
import time
import numpy as np
import pylab as pl
from pykalman import UnscentedKalmanFilter
from numpy.linalg import cholesky
from arp.config import Config
IMAGE_HEI = Config.IMAGE_HEI
lane_wid = Config.lane_wid
#state (x y v theta w)
#(v/?)sin(??t+?)?(v/?)sin(?)+x(t)
#?(v/?)cos(??t+?)+(v/?)cos(?)+y(t)
#? to x axis
class Fusion(object):
def __init__(self, x, v, w):
self.random_state = np.random.RandomState(0)
self.transition_covariance = np.array([[0.5, 0, 0, 0, 0],\
[0, 1, 0, 0, 0],\
[0, 0, 0.1, 0, 0],\
[0, 0, 0, 0.001, 0],\
[0, 0, 0, 0, 0.001],\
])
self.observation_covariance = np.array([[0.5, 0, 0, 0, 0],\
[0, 1, 0, 0, 0],\
[0, 0, 0.5, 0, 0],\
[0, 0, 0, 0.001, 0],\
[0, 0, 0, 0, 0.001],\
])
self.initial_state_mean = [x, 0.1, v, np.pi / 2, w]
# self.initial_state_mean = [0, 0, 20, 0, np.pi / 180]
self.transition_state = self.initial_state_mean
self.obs = self.initial_state_mean
self.pre_parabola_param = [0, 0, 1.75]
self.initial_state_covariance = np.array([[0.5, 0, 0, 0, 0],\
[0, 0.02, 0, 0, 0],\
[0, 0, 0.1, 0, 0],\
[0, 0, 0, 0.001, 0],\
[0, 0, 0, 0, 0.001],\
])
self.T = 0.5
self.estimate_state = [self.initial_state_mean, self.initial_state_covariance]
self.kf = UnscentedKalmanFilter(
self.transition_function, self.observation_function,
self.transition_covariance, self.observation_covariance,
self.initial_state_mean, self.initial_state_covariance,
random_state=self.random_state
)
self.timestamp = time.time()
def transition_function(self, state, noise):
t = self.T
if state[4] == 0:
a = state[2] * np.cos(state[3]) + state[0]
b = state[2] * np.sin(state[3]) + state[1]
c = state[2]
d = np.pi / 2#state[4] * t + state[3]
e = state[4]
else:
r = state[2] / state[4]
a = r * np.sin(state[4] * t + state[3]) - r * np.sin(state[3])# + state[0]
b = -r * np.cos(state[4] * t + state[3]) + r * np.cos(state[3]) + state[1]
c = state[2]
d = np.pi / 2#state[4] * t + state[3]
e = state[4]# + np.sin(debug_index / 10) * 0.01
# e = states_i + np.sin(debug_index / 10) * 10 * np.pi / 180
#adjust x from parabola param(down-right coordinate)
pixl_b = b * (IMAGE_HEI / 40)
parabola_y1 = self.pre_parabola_param[0] * (-pixl_b)**2 + self.pre_parabola_param[1] * (-pixl_b) + self.pre_parabola_param[2]
dalta_y = parabola_y1 - self.pre_parabola_param[2]
# a = dalta_y * (3.5 / lane_wid) + a
pixl_y = parabola_y1 * (3.5 / lane_wid)
a = pixl_y - a
b = b - state[1]
if self.obs[0] - a > 3.5/2:
a += 3.5
elif self.obs[0] - a < -3.5/2:
a -= 3.5
self.transition_state = np.array([a, b, c, d, e]) + noise
# print ("self.transition_state:" + str(self.transition_state))
return self.transition_state
def observation_function(self, state, noise):
# C = np.array([[-1, 0.5], [0.2, 0.1]])
# C = np.array([[1, 0], [0, 1]])
C = np.eye(5)
# return np.dot(C, state) + noise
return state + noise
def update(self, obs):
self.estimate_state = self.kf.filter_update(self.estimate_state[0], self.estimate_state[1], obs, self.transition_function,
self.transition_covariance, self.observation_function, self.observation_covariance)
print ("estimate X:" + str(self.estimate_state[0]))
return self.estimate_state
def update_step(self, x, v, w, t, parabola_param):
print ("update_step1:({},{},{},{},{})".format(x,v,w,t,str(parabola_param)))
self.T = t
self.parabola_param = parabola_param
# obs = [x, y, v, theta, w]
# we didn't have obs_y so use predict obs_y(we didn't use y)
if w == 0 or self.transition_state[4] == 0:
y = self.transition_state[2] * np.sin(self.transition_state[3]) + self.transition_state[1]
else:
r = self.transition_state[2] / self.transition_state[4]
y = -r * np.cos(self.transition_state[4] * t + self.transition_state[3]) + r * np.cos(self.transition_state[3]) + self.transition_state[1]
self.obs = [x, y, v, np.pi / 2, w]
self.timestamp = time.time()
print ("update_step2:({})".format(str(self.obs)))
self.update(self.obs)
self.pre_parabola_param = parabola_param
print ("update_step3:({})".format(str(self.estimate_state[0])))
return self.estimate_state[0][0]
def get_estimate(self):
return self.estimate_state[0][0]
def get_predict(self):
return self.transition_state[0]
def test():
fusion = Fusion()
# states, observations = fusion.kf.sample(50, fusion.initial_state_mean)
states, observations = [], []
states_init = fusion.initial_state_mean
filtered_state_estimates = []
for i in range(1000):
# i = i / 10.
i = fusion.T
if states_init[4] == 0:
a = states_init[2] * np.cos(states_init[3]) + states_init[0]
b = states_init[2] * np.sin(states_init[3]) + states_init[1]
c = states_init[2]
d = states_init[4] * i + states_init[3]
e = states_init[4]
else:
# r = abs(states_init[2] / states_init[4])
r = states_init[2] / states_init[4]
a = r * np.sin(states_init[4] * i + states_init[3]) - r * np.sin(states_init[3]) + states_init[0]
b = -r * np.cos(states_init[4] * i + states_init[3]) + r * np.cos(states_init[3]) + states_init[1]
c = states_init[2]
d = states_init[4] * i + states_init[3]
e = states_init[4]# + np.sin(debug_index / 10) * 0.01
if d > 2 * np.pi:
d = d - 2 * np.pi
elif d < 0:
d = d + 2 * np.pi
true_mu = np.array([[a, b]])
true_sigma = np.array([[2, 0], [0, 4]])
true_R = cholesky(true_sigma)
true_p = np.dot(np.random.randn(1, 2), true_R) + true_mu
true_v = 0.1 * np.random.randn(1) + c
true_theta = 0.1 * np.random.randn(1) + d
true_w = 0.1 * np.random.randn(1) + e
states_init = [a,b,c,d,e]
states_guss = [true_p[0][0], true_p[0][1], true_v[0], true_theta[0], true_w[0]]
states.append(states_guss)
# states.append(states_init)
mu = np.array([[a, b]])
Sigma = np.array([[10, 0], [0, 50]])
R = cholesky(Sigma)
p = np.dot(np.random.randn(1, 2), R) + mu
v = 0.1 * np.random.randn(1) + c
theta = 0.1 * np.random.randn(1) + d
w = 0.1 * np.random.randn(1) + e
obs = [p[0][0], p[0][1], v[0], theta[0], w[0]]
observations.append(obs)
filtered_state_estimates.append(fusion.update(obs)[0])
states = np.array(states)
observations = np.array(observations)
filtered_state_estimates = np.array(filtered_state_estimates)
# estimate state with filtering and smoothing
# filtered_state_estimates = fusion.kf.filter(observations)[0]
# smoothed_state_estimates = kf.smooth(observations)[0]
# draw estimates
pl.figure()
lines_true = pl.plot(states[:, 0:2], color='b')
lines_filt = pl.plot(filtered_state_estimates[:, 0:2], color='r', ls='-')
point_obs = pl.plot(observations[:, 0:2], 'go')
pl.legend((lines_true[0], lines_filt[0], point_obs[0]),
('true', 'filt', 'point_obs'),
loc='lower left'
)
pl.show()
# test()
# debug_index = 0
# for i in range(50):
# debug_index += (np.pi / 6)
# print ("np.sin(debug_index / 10):" + str(np.sin(debug_index / 10)))
| [
"pylab.show",
"numpy.random.randn",
"pylab.plot",
"numpy.random.RandomState",
"time.time",
"numpy.sin",
"numpy.array",
"pylab.figure",
"pykalman.UnscentedKalmanFilter",
"numpy.cos",
"numpy.eye",
"pylab.legend",
"numpy.linalg.cholesky"
] | [((8539, 8555), 'numpy.array', 'np.array', (['states'], {}), '(states)\n', (8547, 8555), True, 'import numpy as np\n'), ((8575, 8597), 'numpy.array', 'np.array', (['observations'], {}), '(observations)\n', (8583, 8597), True, 'import numpy as np\n'), ((8629, 8663), 'numpy.array', 'np.array', (['filtered_state_estimates'], {}), '(filtered_state_estimates)\n', (8637, 8663), True, 'import numpy as np\n'), ((8867, 8878), 'pylab.figure', 'pl.figure', ([], {}), '()\n', (8876, 8878), True, 'import pylab as pl\n'), ((8896, 8930), 'pylab.plot', 'pl.plot', (['states[:, 0:2]'], {'color': '"""b"""'}), "(states[:, 0:2], color='b')\n", (8903, 8930), True, 'import pylab as pl\n'), ((8948, 9008), 'pylab.plot', 'pl.plot', (['filtered_state_estimates[:, 0:2]'], {'color': '"""r"""', 'ls': '"""-"""'}), "(filtered_state_estimates[:, 0:2], color='r', ls='-')\n", (8955, 9008), True, 'import pylab as pl\n'), ((9025, 9060), 'pylab.plot', 'pl.plot', (['observations[:, 0:2]', '"""go"""'], {}), "(observations[:, 0:2], 'go')\n", (9032, 9060), True, 'import pylab as pl\n'), ((9065, 9173), 'pylab.legend', 'pl.legend', (['(lines_true[0], lines_filt[0], point_obs[0])', "('true', 'filt', 'point_obs')"], {'loc': '"""lower left"""'}), "((lines_true[0], lines_filt[0], point_obs[0]), ('true', 'filt',\n 'point_obs'), loc='lower left')\n", (9074, 9173), True, 'import pylab as pl\n'), ((9217, 9226), 'pylab.show', 'pl.show', ([], {}), '()\n', (9224, 9226), True, 'import pylab as pl\n'), ((1228, 1252), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (1249, 1252), True, 'import numpy as np\n'), ((1290, 1402), 'numpy.array', 'np.array', (['[[0.5, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0.1, 0, 0], [0, 0, 0, 0.001, 0],\n [0, 0, 0, 0, 0.001]]'], {}), '([[0.5, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0.1, 0, 0], [0, 0, 0, \n 0.001, 0], [0, 0, 0, 0, 0.001]])\n', (1298, 1402), True, 'import numpy as np\n'), ((1683, 1795), 'numpy.array', 'np.array', (['[[0.5, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0.5, 0, 0], [0, 0, 0, 0.001, 0],\n [0, 0, 0, 0, 0.001]]'], {}), '([[0.5, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0.5, 0, 0], [0, 0, 0, \n 0.001, 0], [0, 0, 0, 0, 0.001]])\n', (1691, 1795), True, 'import numpy as np\n'), ((2341, 2456), 'numpy.array', 'np.array', (['[[0.5, 0, 0, 0, 0], [0, 0.02, 0, 0, 0], [0, 0, 0.1, 0, 0], [0, 0, 0, 0.001,\n 0], [0, 0, 0, 0, 0.001]]'], {}), '([[0.5, 0, 0, 0, 0], [0, 0.02, 0, 0, 0], [0, 0, 0.1, 0, 0], [0, 0, \n 0, 0.001, 0], [0, 0, 0, 0, 0.001]])\n', (2349, 2456), True, 'import numpy as np\n'), ((2835, 3068), 'pykalman.UnscentedKalmanFilter', 'UnscentedKalmanFilter', (['self.transition_function', 'self.observation_function', 'self.transition_covariance', 'self.observation_covariance', 'self.initial_state_mean', 'self.initial_state_covariance'], {'random_state': 'self.random_state'}), '(self.transition_function, self.observation_function,\n self.transition_covariance, self.observation_covariance, self.\n initial_state_mean, self.initial_state_covariance, random_state=self.\n random_state)\n', (2856, 3068), False, 'from pykalman import UnscentedKalmanFilter\n'), ((3138, 3149), 'time.time', 'time.time', ([], {}), '()\n', (3147, 3149), False, 'import time\n'), ((4764, 4773), 'numpy.eye', 'np.eye', (['(5)'], {}), '(5)\n', (4770, 4773), True, 'import numpy as np\n'), ((5985, 5996), 'time.time', 'time.time', ([], {}), '()\n', (5994, 5996), False, 'import time\n'), ((7585, 7603), 'numpy.array', 'np.array', (['[[a, b]]'], {}), '([[a, b]])\n', (7593, 7603), True, 'import numpy as np\n'), ((7625, 7651), 'numpy.array', 'np.array', (['[[2, 0], [0, 4]]'], {}), '([[2, 0], [0, 4]])\n', (7633, 7651), True, 'import numpy as np\n'), ((7669, 7689), 'numpy.linalg.cholesky', 'cholesky', (['true_sigma'], {}), '(true_sigma)\n', (7677, 7689), False, 'from numpy.linalg import cholesky\n'), ((8105, 8123), 'numpy.array', 'np.array', (['[[a, b]]'], {}), '([[a, b]])\n', (8113, 8123), True, 'import numpy as np\n'), ((8140, 8168), 'numpy.array', 'np.array', (['[[10, 0], [0, 50]]'], {}), '([[10, 0], [0, 50]])\n', (8148, 8168), True, 'import numpy as np\n'), ((8181, 8196), 'numpy.linalg.cholesky', 'cholesky', (['Sigma'], {}), '(Sigma)\n', (8189, 8196), False, 'from numpy.linalg import cholesky\n'), ((4469, 4494), 'numpy.array', 'np.array', (['[a, b, c, d, e]'], {}), '([a, b, c, d, e])\n', (4477, 4494), True, 'import numpy as np\n'), ((7714, 7735), 'numpy.random.randn', 'np.random.randn', (['(1)', '(2)'], {}), '(1, 2)\n', (7729, 7735), True, 'import numpy as np\n'), ((7778, 7796), 'numpy.random.randn', 'np.random.randn', (['(1)'], {}), '(1)\n', (7793, 7796), True, 'import numpy as np\n'), ((7828, 7846), 'numpy.random.randn', 'np.random.randn', (['(1)'], {}), '(1)\n', (7843, 7846), True, 'import numpy as np\n'), ((7874, 7892), 'numpy.random.randn', 'np.random.randn', (['(1)'], {}), '(1)\n', (7889, 7892), True, 'import numpy as np\n'), ((8216, 8237), 'numpy.random.randn', 'np.random.randn', (['(1)', '(2)'], {}), '(1, 2)\n', (8231, 8237), True, 'import numpy as np\n'), ((8265, 8283), 'numpy.random.randn', 'np.random.randn', (['(1)'], {}), '(1)\n', (8280, 8283), True, 'import numpy as np\n'), ((8310, 8328), 'numpy.random.randn', 'np.random.randn', (['(1)'], {}), '(1)\n', (8325, 8328), True, 'import numpy as np\n'), ((8351, 8369), 'numpy.random.randn', 'np.random.randn', (['(1)'], {}), '(1)\n', (8366, 8369), True, 'import numpy as np\n'), ((3272, 3288), 'numpy.cos', 'np.cos', (['state[3]'], {}), '(state[3])\n', (3278, 3288), True, 'import numpy as np\n'), ((3327, 3343), 'numpy.sin', 'np.sin', (['state[3]'], {}), '(state[3])\n', (3333, 3343), True, 'import numpy as np\n'), ((3525, 3556), 'numpy.sin', 'np.sin', (['(state[4] * t + state[3])'], {}), '(state[4] * t + state[3])\n', (3531, 3556), True, 'import numpy as np\n'), ((3563, 3579), 'numpy.sin', 'np.sin', (['state[3]'], {}), '(state[3])\n', (3569, 3579), True, 'import numpy as np\n'), ((5624, 5656), 'numpy.sin', 'np.sin', (['self.transition_state[3]'], {}), '(self.transition_state[3])\n', (5630, 5656), True, 'import numpy as np\n'), ((6746, 6768), 'numpy.cos', 'np.cos', (['states_init[3]'], {}), '(states_init[3])\n', (6752, 6768), True, 'import numpy as np\n'), ((6819, 6841), 'numpy.sin', 'np.sin', (['states_init[3]'], {}), '(states_init[3])\n', (6825, 6841), True, 'import numpy as np\n'), ((3613, 3644), 'numpy.cos', 'np.cos', (['(state[4] * t + state[3])'], {}), '(state[4] * t + state[3])\n', (3619, 3644), True, 'import numpy as np\n'), ((3651, 3667), 'numpy.cos', 'np.cos', (['state[3]'], {}), '(state[3])\n', (3657, 3667), True, 'import numpy as np\n'), ((5787, 5850), 'numpy.cos', 'np.cos', (['(self.transition_state[4] * t + self.transition_state[3])'], {}), '(self.transition_state[4] * t + self.transition_state[3])\n', (5793, 5850), True, 'import numpy as np\n'), ((5857, 5889), 'numpy.cos', 'np.cos', (['self.transition_state[3]'], {}), '(self.transition_state[3])\n', (5863, 5889), True, 'import numpy as np\n'), ((7110, 7153), 'numpy.sin', 'np.sin', (['(states_init[4] * i + states_init[3])'], {}), '(states_init[4] * i + states_init[3])\n', (7116, 7153), True, 'import numpy as np\n'), ((7160, 7182), 'numpy.sin', 'np.sin', (['states_init[3]'], {}), '(states_init[3])\n', (7166, 7182), True, 'import numpy as np\n'), ((7221, 7264), 'numpy.cos', 'np.cos', (['(states_init[4] * i + states_init[3])'], {}), '(states_init[4] * i + states_init[3])\n', (7227, 7264), True, 'import numpy as np\n'), ((7271, 7293), 'numpy.cos', 'np.cos', (['states_init[3]'], {}), '(states_init[3])\n', (7277, 7293), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import numpy as np
from Base.Recommender import Recommender
from Base.Recommender_utils import check_matrix
class TopPop(Recommender):
"""Top Popular recommender"""
RECOMMENDER_NAME = "TopPopRecommender"
def __init__(self, URM_train):
super(TopPop, self).__init__()
# convert to csc matrix for faster column-wise sum
self.URM_train = check_matrix(URM_train, 'csc', dtype=np.float32)
self.URM_train.eliminate_zeros()
self.compute_item_score = self.compute_score_top_pop
def fit(self):
# This command returns a numpy.matrix of size (1, nitems)
# Use np.ediff1d and NOT a sum done over the rows as there might be values other than 0/1
self.item_pop = np.ediff1d(self.URM_train.indptr)
self.URM_train = check_matrix(self.URM_train, 'csr', dtype=np.float32)
self.item_pop = np.asarray(self.item_pop).squeeze() # necessary to convert it into a numpy.array of size (nitems,)
def compute_score_top_pop(self, user_id_array):
scores_batch = np.array(self.item_pop.copy(), dtype=np.float).reshape((1, -1))
scores_batch = np.repeat(scores_batch, len(user_id_array), axis = 0)
return scores_batch
def __str__(self):
return "TopPop"
class GlobalEffects(Recommender):
"""docstring for GlobalEffects"""
RECOMMENDER_NAME = "GlobalEffectsRecommender"
def __init__(self, URM_train):
super(GlobalEffects, self).__init__()
self.URM_train = check_matrix(URM_train, 'csc', dtype=np.float32)
self.compute_item_score = self.compute_score_global_effects
def fit(self, lambda_user=10, lambda_item=25):
self.lambda_user = lambda_user
self.lambda_item = lambda_item
# convert to csc matrix for faster column-wise sum
# 1) global average
self.mu = self.URM_train.data.sum(dtype=np.float32) / self.URM_train.data.shape[0]
# 2) item average bias
# compute the number of non-zero elements for each column
col_nnz = np.diff(self.URM_train.indptr)
# it is equivalent to:
# col_nnz = X.indptr[1:] - X.indptr[:-1]
# and it is **much faster** than
# col_nnz = (X != 0).sum(axis=0)
URM_train_unbiased = self.URM_train.copy()
URM_train_unbiased.data -= self.mu
self.item_bias = URM_train_unbiased.sum(axis=0) / (col_nnz + self.lambda_item)
self.item_bias = np.asarray(self.item_bias).ravel() # converts 2-d matrix to 1-d array without anycopy
# 3) user average bias
# NOTE: the user bias is *useless* for the sake of ranking items. We just show it here for educational purposes.
# first subtract the item biases from each column
# then repeat each element of the item bias vector a number of times equal to col_nnz
# and subtract it from the data vector
URM_train_unbiased.data -= np.repeat(self.item_bias, col_nnz)
# now convert the csc matrix to csr for efficient row-wise computation
URM_train_unbiased_csr = URM_train_unbiased.tocsr()
row_nnz = np.diff(URM_train_unbiased_csr.indptr)
# finally, let's compute the bias
self.user_bias = URM_train_unbiased_csr.sum(axis=1).ravel() / (row_nnz + self.lambda_user)
# 4) precompute the item ranking by using the item bias only
# the global average and user bias won't change the ranking, so there is no need to use them
#self.item_ranking = np.argsort(self.bi)[::-1]
self.URM_train = check_matrix(self.URM_train, 'csr', dtype=np.float32)
def compute_score_global_effects(self, user_id_array):
scores_batch = np.array(self.item_bias.copy(), dtype=np.float).reshape((1, -1))
scores_batch = np.repeat(scores_batch, len(user_id_array), axis = 0)
return scores_batch
def __str__(self):
return 'GlobalEffects'
class Random(Recommender):
"""Random recommender"""
RECOMMENDER_NAME = "RandomRecommender"
def __init__(self, URM_train):
super(Random, self).__init__()
# convert to csc matrix for faster column-wise sum
self.URM_train = check_matrix(URM_train, 'csr', dtype=np.float32)
self.compute_item_score = self.compute_score_random
def fit(self):
self.n_items = self.URM_train.shape[1]
def compute_score_random(self, user_id_array):
return np.random.rand(len(user_id_array), self.n_items)
def __str__(self):
return "Random" | [
"Base.Recommender_utils.check_matrix",
"numpy.asarray",
"numpy.diff",
"numpy.ediff1d",
"numpy.repeat"
] | [((448, 496), 'Base.Recommender_utils.check_matrix', 'check_matrix', (['URM_train', '"""csc"""'], {'dtype': 'np.float32'}), "(URM_train, 'csc', dtype=np.float32)\n", (460, 496), False, 'from Base.Recommender_utils import check_matrix\n'), ((810, 843), 'numpy.ediff1d', 'np.ediff1d', (['self.URM_train.indptr'], {}), '(self.URM_train.indptr)\n', (820, 843), True, 'import numpy as np\n'), ((871, 924), 'Base.Recommender_utils.check_matrix', 'check_matrix', (['self.URM_train', '"""csr"""'], {'dtype': 'np.float32'}), "(self.URM_train, 'csr', dtype=np.float32)\n", (883, 924), False, 'from Base.Recommender_utils import check_matrix\n'), ((1586, 1634), 'Base.Recommender_utils.check_matrix', 'check_matrix', (['URM_train', '"""csc"""'], {'dtype': 'np.float32'}), "(URM_train, 'csc', dtype=np.float32)\n", (1598, 1634), False, 'from Base.Recommender_utils import check_matrix\n'), ((2134, 2164), 'numpy.diff', 'np.diff', (['self.URM_train.indptr'], {}), '(self.URM_train.indptr)\n', (2141, 2164), True, 'import numpy as np\n'), ((3010, 3044), 'numpy.repeat', 'np.repeat', (['self.item_bias', 'col_nnz'], {}), '(self.item_bias, col_nnz)\n', (3019, 3044), True, 'import numpy as np\n'), ((3203, 3241), 'numpy.diff', 'np.diff', (['URM_train_unbiased_csr.indptr'], {}), '(URM_train_unbiased_csr.indptr)\n', (3210, 3241), True, 'import numpy as np\n'), ((3636, 3689), 'Base.Recommender_utils.check_matrix', 'check_matrix', (['self.URM_train', '"""csr"""'], {'dtype': 'np.float32'}), "(self.URM_train, 'csr', dtype=np.float32)\n", (3648, 3689), False, 'from Base.Recommender_utils import check_matrix\n'), ((4266, 4314), 'Base.Recommender_utils.check_matrix', 'check_matrix', (['URM_train', '"""csr"""'], {'dtype': 'np.float32'}), "(URM_train, 'csr', dtype=np.float32)\n", (4278, 4314), False, 'from Base.Recommender_utils import check_matrix\n'), ((950, 975), 'numpy.asarray', 'np.asarray', (['self.item_pop'], {}), '(self.item_pop)\n', (960, 975), True, 'import numpy as np\n'), ((2535, 2561), 'numpy.asarray', 'np.asarray', (['self.item_bias'], {}), '(self.item_bias)\n', (2545, 2561), True, 'import numpy as np\n')] |
import logging
from typing import Union
from jigsawpy import jigsaw_msh_t, jigsaw_jig_t
from jigsawpy import libsaw
import numpy as np
from pyproj import CRS
from ocsmesh import utils
from ocsmesh.mesh import Mesh
from ocsmesh.hfun import Hfun
from ocsmesh.hfun.base import BaseHfun
from ocsmesh.geom import Geom
from ocsmesh.geom.base import BaseGeom
_logger = logging.getLogger(__name__)
class GeomDescriptor:
def __set__(self, obj, val):
if not isinstance(val, BaseGeom):
raise TypeError(f'Argument geom must be of type {Geom}, '
f'not type {type(val)}.')
obj.__dict__['geom'] = val
def __get__(self, obj, val):
return obj.__dict__['geom']
class HfunDescriptor:
def __set__(self, obj, val):
if not isinstance(val, BaseHfun):
raise TypeError(f'Argument hfun must be of type {Hfun}, '
f'not type {type(val)}.')
obj.__dict__['hfun'] = val
def __get__(self, obj, val):
return obj.__dict__['hfun']
class OptsDescriptor:
def __get__(self, obj, val):
opts = obj.__dict__.get('opts')
if opts is None:
opts = jigsaw_jig_t()
opts.mesh_dims = +2
opts.optm_tria = True
opts.hfun_scal = 'absolute'
obj.__dict__['opts'] = opts
return opts
class JigsawDriver:
_geom = GeomDescriptor()
_hfun = HfunDescriptor()
_opts = OptsDescriptor()
def __init__(
self,
geom: Geom,
hfun: Hfun,
initial_mesh: bool = False,
crs: Union[str, CRS] = None,
verbosity: int = 0,
):
"""
geom can be SizeFunction or PlanarStraightLineGraph instance.
"""
self._geom = geom
self._hfun = hfun
self._init = initial_mesh
self._crs = CRS.from_user_input(crs) if crs is not None else crs
self._opts.verbosity = verbosity
def run(self, sieve=None, quality_metric=1.05):
hfun_msh_t = self.hfun.msh_t()
output_mesh = jigsaw_msh_t()
output_mesh.mshID = 'euclidean-mesh'
output_mesh.ndims = 2
self.opts.hfun_hmin = np.min(hfun_msh_t.value)
self.opts.hfun_hmax = np.max(hfun_msh_t.value)
self.opts.mesh_rad2 = float(quality_metric)
geom_msh_t = self.geom.msh_t()
# When the center of geom and hfun are NOT the same, utm
# zones would be different for resulting msh_t.
if geom_msh_t.crs != hfun_msh_t.crs:
utils.reproject(hfun_msh_t, geom_msh_t.crs)
output_mesh.crs = hfun_msh_t.crs
_logger.info('Calling libsaw.jigsaw() ...')
libsaw.jigsaw(
self.opts,
geom_msh_t,
output_mesh,
init=hfun_msh_t if self._init is True else None,
hfun=hfun_msh_t
)
# post process
if output_mesh.tria3['index'].shape[0] == 0:
_err = 'ERROR: Jigsaw returned empty mesh.'
_logger.error(_err)
raise Exception(_err)
if self._crs is not None:
utils.reproject(output_mesh, self._crs)
_logger.info('Finalizing mesh...')
# Don't need to use ad-hoc fix since Jigsaw tiny element
# issue is resolve. In case needed add a flag for remesh
# since it's computationally expensive
# if self.opts.hfun_hmin > 0:
# output_mesh = utils.remesh_small_elements(
# self.opts, geom_msh_t, output_mesh, hfun_msh_t)
utils.finalize_mesh(output_mesh, sieve)
_logger.info('done!')
return Mesh(output_mesh)
| [
"pyproj.CRS.from_user_input",
"ocsmesh.utils.reproject",
"ocsmesh.utils.finalize_mesh",
"ocsmesh.mesh.Mesh",
"numpy.min",
"numpy.max",
"jigsawpy.jigsaw_msh_t",
"jigsawpy.libsaw.jigsaw",
"jigsawpy.jigsaw_jig_t",
"logging.getLogger"
] | [((366, 393), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (383, 393), False, 'import logging\n'), ((2100, 2114), 'jigsawpy.jigsaw_msh_t', 'jigsaw_msh_t', ([], {}), '()\n', (2112, 2114), False, 'from jigsawpy import jigsaw_msh_t, jigsaw_jig_t\n'), ((2221, 2245), 'numpy.min', 'np.min', (['hfun_msh_t.value'], {}), '(hfun_msh_t.value)\n', (2227, 2245), True, 'import numpy as np\n'), ((2276, 2300), 'numpy.max', 'np.max', (['hfun_msh_t.value'], {}), '(hfun_msh_t.value)\n', (2282, 2300), True, 'import numpy as np\n'), ((2718, 2838), 'jigsawpy.libsaw.jigsaw', 'libsaw.jigsaw', (['self.opts', 'geom_msh_t', 'output_mesh'], {'init': '(hfun_msh_t if self._init is True else None)', 'hfun': 'hfun_msh_t'}), '(self.opts, geom_msh_t, output_mesh, init=hfun_msh_t if self.\n _init is True else None, hfun=hfun_msh_t)\n', (2731, 2838), False, 'from jigsawpy import libsaw\n'), ((3577, 3616), 'ocsmesh.utils.finalize_mesh', 'utils.finalize_mesh', (['output_mesh', 'sieve'], {}), '(output_mesh, sieve)\n', (3596, 3616), False, 'from ocsmesh import utils\n'), ((3663, 3680), 'ocsmesh.mesh.Mesh', 'Mesh', (['output_mesh'], {}), '(output_mesh)\n', (3667, 3680), False, 'from ocsmesh.mesh import Mesh\n'), ((1194, 1208), 'jigsawpy.jigsaw_jig_t', 'jigsaw_jig_t', ([], {}), '()\n', (1206, 1208), False, 'from jigsawpy import jigsaw_msh_t, jigsaw_jig_t\n'), ((1890, 1914), 'pyproj.CRS.from_user_input', 'CRS.from_user_input', (['crs'], {}), '(crs)\n', (1909, 1914), False, 'from pyproj import CRS\n'), ((2572, 2615), 'ocsmesh.utils.reproject', 'utils.reproject', (['hfun_msh_t', 'geom_msh_t.crs'], {}), '(hfun_msh_t, geom_msh_t.crs)\n', (2587, 2615), False, 'from ocsmesh import utils\n'), ((3150, 3189), 'ocsmesh.utils.reproject', 'utils.reproject', (['output_mesh', 'self._crs'], {}), '(output_mesh, self._crs)\n', (3165, 3189), False, 'from ocsmesh import utils\n')] |
# coding=utf-8
# Author: <NAME> <<EMAIL>>
import numpy as np
from perturbation_classifiers.base import BasePerC
from perturbation_classifiers.estimation import estimate_mean_vector_per_class, estimate_covariance_matrix_per_class, estimate_delta_mean_vector_per_class, estimate_delta_covariance_matrix_per_class
from perturbation_classifiers.perturbation import perturbation_mean, perturbation_covariance, perturbation_combination
class PerC(BasePerC):
"""Perturbation-based Classifier.
"""
def __init__(self, mode="auto"):
super(PerC, self).__init__(mode=mode)
def fit(self, X, y):
"""Fit the perturbation classifiers according to the given training data.
Parameters
----------
X : array of shape (n_samples, n_features)
The input data.
y : array of shape (n_samples, )
class labels of each example in X.
Returns
-------
self
"""
super(PerC, self).fit(X, y)
# Store the classes and its quantity seen during fit
self.classes_, self.count_classes_ = np.unique(y, return_counts=True)
# Check that mode paramenters have correct value
self._validate_parameters()
# Compute means vectors for each class
self.means_ = estimate_mean_vector_per_class(X, y, self.classes_, self.count_classes_)
# Compute covariances matrix for each class
if self.mode != "mean":
self.covariances_ = estimate_covariance_matrix_per_class(X, y, self.classes_, self.count_classes_, self.means_)
# Compute pseudo-inverse matrix for each covariance matrix
if self.mode == "auto":
self.inverse_covariances_ = np.linalg.pinv(self.covariances_)
return self
def perturbation(self, X):
"""Return the perturbation for sample in X.
Parameters
----------
X : array of shape (n_samples, n_features)
The input data.
Returns
-------
perturbations : array of shape (n_samples, n_classes)
Perturbation estimates for each sample in X.
"""
super(PerC, self).perturbation(X)
# Compute perturbation-based the mode input
perturbations = None
if self.mode == "mean":
# Compute the perturbation-based only mean vector
delta_mean_vectors = estimate_delta_mean_vector_per_class(X, self.means_, self.count_classes_)
perturbations = perturbation_mean(delta_mean_vectors)
elif self.mode == "covariance":
# Compute the perturbation-based only covariance matrix
delta_covariances_matrix = estimate_delta_covariance_matrix_per_class(X, self.means_, self.covariances_, self.count_classes_)
perturbations = perturbation_covariance(delta_covariances_matrix)
else:
# Compute the perturbation-based combination mean vector and covariance matrix
delta_mean_vectors = estimate_delta_mean_vector_per_class(X, self.means_, self.count_classes_)
delta_covariances_matrix = estimate_delta_covariance_matrix_per_class(X, self.means_, self.covariances_, self.count_classes_)
perturbations = perturbation_combination(X, self.means_, self.inverse_covariances_, delta_mean_vectors, delta_covariances_matrix)
return perturbations
def _validate_parameters(self):
"""Verify if the input parameters are correct.
"""
# Validate mode parameter
if self.mode not in ["auto", "mean", "covariance"]:
raise ValueError(
'Invalid value for parameter "mode".'
' "mode" should be one of these options '
'"auto", "mean", "covariance"'
) | [
"perturbation_classifiers.perturbation.perturbation_covariance",
"perturbation_classifiers.estimation.estimate_mean_vector_per_class",
"perturbation_classifiers.estimation.estimate_covariance_matrix_per_class",
"perturbation_classifiers.perturbation.perturbation_combination",
"perturbation_classifiers.pertu... | [((1118, 1150), 'numpy.unique', 'np.unique', (['y'], {'return_counts': '(True)'}), '(y, return_counts=True)\n', (1127, 1150), True, 'import numpy as np\n'), ((1315, 1387), 'perturbation_classifiers.estimation.estimate_mean_vector_per_class', 'estimate_mean_vector_per_class', (['X', 'y', 'self.classes_', 'self.count_classes_'], {}), '(X, y, self.classes_, self.count_classes_)\n', (1345, 1387), False, 'from perturbation_classifiers.estimation import estimate_mean_vector_per_class, estimate_covariance_matrix_per_class, estimate_delta_mean_vector_per_class, estimate_delta_covariance_matrix_per_class\n'), ((1505, 1601), 'perturbation_classifiers.estimation.estimate_covariance_matrix_per_class', 'estimate_covariance_matrix_per_class', (['X', 'y', 'self.classes_', 'self.count_classes_', 'self.means_'], {}), '(X, y, self.classes_, self.\n count_classes_, self.means_)\n', (1541, 1601), False, 'from perturbation_classifiers.estimation import estimate_mean_vector_per_class, estimate_covariance_matrix_per_class, estimate_delta_mean_vector_per_class, estimate_delta_covariance_matrix_per_class\n'), ((1737, 1770), 'numpy.linalg.pinv', 'np.linalg.pinv', (['self.covariances_'], {}), '(self.covariances_)\n', (1751, 1770), True, 'import numpy as np\n'), ((2427, 2500), 'perturbation_classifiers.estimation.estimate_delta_mean_vector_per_class', 'estimate_delta_mean_vector_per_class', (['X', 'self.means_', 'self.count_classes_'], {}), '(X, self.means_, self.count_classes_)\n', (2463, 2500), False, 'from perturbation_classifiers.estimation import estimate_mean_vector_per_class, estimate_covariance_matrix_per_class, estimate_delta_mean_vector_per_class, estimate_delta_covariance_matrix_per_class\n'), ((2529, 2566), 'perturbation_classifiers.perturbation.perturbation_mean', 'perturbation_mean', (['delta_mean_vectors'], {}), '(delta_mean_vectors)\n', (2546, 2566), False, 'from perturbation_classifiers.perturbation import perturbation_mean, perturbation_covariance, perturbation_combination\n'), ((2715, 2818), 'perturbation_classifiers.estimation.estimate_delta_covariance_matrix_per_class', 'estimate_delta_covariance_matrix_per_class', (['X', 'self.means_', 'self.covariances_', 'self.count_classes_'], {}), '(X, self.means_, self.\n covariances_, self.count_classes_)\n', (2757, 2818), False, 'from perturbation_classifiers.estimation import estimate_mean_vector_per_class, estimate_covariance_matrix_per_class, estimate_delta_mean_vector_per_class, estimate_delta_covariance_matrix_per_class\n'), ((2842, 2891), 'perturbation_classifiers.perturbation.perturbation_covariance', 'perturbation_covariance', (['delta_covariances_matrix'], {}), '(delta_covariances_matrix)\n', (2865, 2891), False, 'from perturbation_classifiers.perturbation import perturbation_mean, perturbation_covariance, perturbation_combination\n'), ((3031, 3104), 'perturbation_classifiers.estimation.estimate_delta_mean_vector_per_class', 'estimate_delta_mean_vector_per_class', (['X', 'self.means_', 'self.count_classes_'], {}), '(X, self.means_, self.count_classes_)\n', (3067, 3104), False, 'from perturbation_classifiers.estimation import estimate_mean_vector_per_class, estimate_covariance_matrix_per_class, estimate_delta_mean_vector_per_class, estimate_delta_covariance_matrix_per_class\n'), ((3144, 3247), 'perturbation_classifiers.estimation.estimate_delta_covariance_matrix_per_class', 'estimate_delta_covariance_matrix_per_class', (['X', 'self.means_', 'self.covariances_', 'self.count_classes_'], {}), '(X, self.means_, self.\n covariances_, self.count_classes_)\n', (3186, 3247), False, 'from perturbation_classifiers.estimation import estimate_mean_vector_per_class, estimate_covariance_matrix_per_class, estimate_delta_mean_vector_per_class, estimate_delta_covariance_matrix_per_class\n'), ((3271, 3388), 'perturbation_classifiers.perturbation.perturbation_combination', 'perturbation_combination', (['X', 'self.means_', 'self.inverse_covariances_', 'delta_mean_vectors', 'delta_covariances_matrix'], {}), '(X, self.means_, self.inverse_covariances_,\n delta_mean_vectors, delta_covariances_matrix)\n', (3295, 3388), False, 'from perturbation_classifiers.perturbation import perturbation_mean, perturbation_covariance, perturbation_combination\n')] |
"""This module contains the classes for recharge models.
This module contains the different classes that can be used to simulate the
effect of precipitation and evapotranspiration on groundwater levels.
Depending on the mathematical formulation this effect may be interpreted as:
1. seepage to the groundwater
2. precipitation excess,
3. groundwater recharge.
For the implementation of each model we refer to the references listed in
the documentation of each recharge model.
The classes defined here are designed to be used in conjunction with the
stressmodel "RechargeModel", which requires an instance of one of the
classes defined here.
.. codeauthor:: <NAME>, University of Graz
See Also
--------
pastas.stressmodels.RechargeModel
The recharge models listed above are provided to a RechargeModel
Examples
--------
Using the recharge models is as follows:
>>> rch = ps.rch.FlexModel()
>>> sm = ps.RechargeModel(prec, evap, recharge=rch, rfunc=ps.Gamma, name="rch")
>>> ml.add_stressmodel(sm)
After solving a model, the simulated recharge flux can be obtained:
>>> rch_sim = ml.get_stress("rch")
"""
from numpy import add, float64, multiply, exp, zeros, nan_to_num, vstack
from pandas import DataFrame
from pastas.decorators import njit
class RechargeBase:
"""Base class for classes that calculate the recharge.
"""
def __init__(self):
self.temp = False
self.nparam = 0
@staticmethod
def get_init_parameters(name="recharge"):
"""Method to obtain the initial parameters.
Parameters
----------
name: str, optional
String with the name that is used as prefix for the parameters.
Returns
-------
parameters: pandas.DataFrame
Pandas DataFrame with the parameters.
"""
parameters = DataFrame(
columns=["initial", "pmin", "pmax", "vary", "name"])
return parameters
def simulate(self, prec, evap, p, dt=1.0, **kwargs):
pass
class Linear(RechargeBase):
"""Linear model for precipitation excess according to [asmuth_2002]_.
Notes
-----
The precipitation excess is calculated as:
.. math::
R = P - f * E
References
----------
.. [asmuth_2002] von <NAME>., <NAME>., and <NAME>. (2002) Transfer
function-noise modeling in continuous time using predefined impulse
response functions, Water Resources Research, 38, 23–1–23–12.
"""
_name = "Linear"
def __init__(self):
RechargeBase.__init__(self)
self.nparam = 1
def get_init_parameters(self, name="recharge"):
parameters = DataFrame(
columns=["initial", "pmin", "pmax", "vary", "name"])
parameters.loc[name + "_f"] = (-1.0, -2.0, 0.0, True, name)
return parameters
def simulate(self, prec, evap, p, **kwargs):
"""Simulate the precipitation excess flux.
Parameters
----------
prec, evap: array_like
array with the precipitation and evapotranspiration values. These
arrays must be of the same length and at the same time steps.
p: array_like
array_like object with the values as floats representing the
model parameters.
Returns
-------
recharge: array_like
array with the recharge series.
"""
return add(prec, multiply(evap, p))
def get_water_balance(self, prec, evap, p, **kwargs):
ea = multiply(evap, p)
r = add(prec, multiply(evap, p))
data = DataFrame(data=vstack((prec, ea, -r)).T,
columns=["P", "Ea", "R"])
return data
class FlexModel(RechargeBase):
"""Recharge to the groundwater calculate according to [collenteur_2020]_.
Notes
-----
Note that the preferred unit of the precipitation and evaporation is mm/d.
The water balance for the unsaturated zone reservoir is written as:
.. math::
\\frac{dS}{dt} = P_e - E_a - R
where the recharge is calculated as:
.. math::
R = K_s \\left( \\frac{S}{S_u}\\right) ^\\gamma
For a detailed description of the recharge model and parameters we refer
to Collenteur et al. (in review).
References
----------
.. [collenteur_2020] <NAME>., <NAME>., <NAME>., & Birk,
S. (in Review) Estimating groundwater recharge from groundwater
levels using non-linear transfer function noise models and comparison to
lysimeter data. https://doi.org/10.5194/hess-2020-392
"""
_name = "FlexModel"
def __init__(self):
RechargeBase.__init__(self)
self.nparam = 6
def get_init_parameters(self, name="recharge"):
parameters = DataFrame(
columns=["initial", "pmin", "pmax", "vary", "name"])
parameters.loc[name + "_srmax"] = (250.0, 1e-5, 1e3, True, name)
parameters.loc[name + "_lp"] = (0.25, 1e-5, 1, False, name)
parameters.loc[name + "_ks"] = (100.0, 1, 1e4, True, name)
parameters.loc[name + "_gamma"] = (4.0, 1e-5, 50.0, True, name)
parameters.loc[name + "_simax"] = (2.0, 1e-5, 10.0, False, name)
parameters.loc[name + "_kv"] = (1.0, 0.5, 2.0, False, name)
return parameters
def simulate(self, prec, evap, p, dt=1.0, **kwargs):
"""Simulate the recharge flux.
Parameters
----------
prec: numpy.array
Precipitation flux in mm/d. Has to have the same length as evap.
evap: numpy.array
Potential evaporation flux in mm/d.
p: array_like
array_like object with the values as floats representing the
model parameters.
dt: float, optional
time step for the calculation of the recharge. Only dt=1 is
possible now.
Returns
-------
r: numpy.array
Recharge flux calculated by the model.
"""
r = self.get_recharge(prec, evap, srmax=p[0], lp=p[1], ks=p[2],
gamma=p[3], simax=p[4], kv=p[5], dt=dt)[0]
return r
@staticmethod
@njit
def get_recharge(prec, evap, srmax=250.0, lp=0.25, ks=100.0, gamma=4.0,
simax=2.0, kv=1.0, dt=1.0):
"""
Internal method used for the recharge calculation. If Numba is
available, this method is significantly faster.
"""
n = prec.size
evap = evap * kv # Multiply by crop factor
# Create empty arrays to store the fluxes and states
su = zeros(n, dtype=float64) # Root Zone Storage State
su[0] = 0.5 * srmax # Set the initial system state to half-full
ea = zeros(n, dtype=float64) # Actual evaporation Flux
r = zeros(n, dtype=float64) # Recharge Flux
si = zeros(n, dtype=float64) # Interception Storage State
pe = zeros(n, dtype=float64) # Effective precipitation Flux
ei = zeros(n, dtype=float64) # Interception evaporation Flux
ep = zeros(n, dtype=float64) # Updated evaporation Flux
lp = lp * srmax # Do this here outside the for-loop for efficiency
for t in range(n - 1):
# Interception bucket
pe[t] = max(prec[t] - simax + si[t], 0.0)
ei[t] = min(evap[t], si[t])
ep[t] = evap[t] - ei[t]
si[t + 1] = si[t] + dt * (prec[t] - pe[t] - ei[t])
# Make sure the solution is larger then 0.0 and smaller than su
if su[t] > srmax:
su[t] = srmax
elif su[t] < 0.0:
su[t] = 0.0
# Calculate actual evapotranspiration
if su[t] / lp < 1.0:
ea[t] = ep[t] * su[t] / lp
else:
ea[t] = ep[t]
# Calculate the recharge flux
r[t] = ks * (su[t] / srmax) ** gamma
# Calculate state of the root zone storage
su[t + 1] = su[t] + dt * (pe[t] - r[t] - ea[t])
return r, ea, ei, pe, su, si
def get_water_balance(self, prec, evap, p, dt=1.0, **kwargs):
r, ea, ei, pe, sr, si = self.get_recharge(prec, evap, srmax=p[0],
lp=p[1], ks=p[2],
gamma=p[3], simax=p[4],
kv=p[5], dt=dt)
data = DataFrame(data=vstack((si, -ei, sr, pe, -ea, -r)).T,
columns=["Si", "Ei", "Sr", "Pe", "Ea", "R"])
return data
class Berendrecht(RechargeBase):
"""Recharge to the groundwater calculated according to [berendrecht_2006]_.
Notes
-----
Note that the preferred unit of the precipitation and evaporation is
mm/d. The waterbalance for the unsaturated zone reservoir is written as:
.. math::
\\frac{dS_e}{dt} = \\frac{1}{D_e}(f_iP - E_a - R)
where the recharge is calculated as:
.. math::
R(S_e) = K_sS_e^\\lambda(1-(1-S_e^{1/m})^m)^2
For a detailed description of the recharge model and parameters we refer
to the original publication.
References
----------
.. [berendrecht_2006] <NAME>., <NAME>., <NAME>.,
and <NAME>. (2006) A non-linear state space approach to model
groundwater fluctuations, Advances in Water Resources, 29, 959–973.
"""
_name = "Berendrecht"
def __init__(self):
RechargeBase.__init__(self)
self.nparam = 7
def get_init_parameters(self, name="recharge"):
parameters = DataFrame(
columns=["initial", "pmin", "pmax", "vary", "name"])
parameters.loc[name + "_fi"] = (0.9, 0.7, 1.3, False, name)
parameters.loc[name + "_fc"] = (1.0, 0.7, 1.3, False, name)
parameters.loc[name + "_sr"] = (0.25, 1e-5, 1.0, False, name)
parameters.loc[name + "_de"] = (250.0, 20, 1e3, True, name)
parameters.loc[name + "_l"] = (2.0, -4, 50, True, name)
parameters.loc[name + "_m"] = (0.5, 1e-5, 0.5, False, name)
parameters.loc[name + "_ks"] = (100.0, 1, 1e4, True, name)
return parameters
def simulate(self, prec, evap, p, dt=1.0, **kwargs):
"""Simulate the recharge flux.
Parameters
----------
prec: numpy.array
Precipitation flux in mm/d. Has to have the same length as evap.
evap: numpy.array
Potential evapotranspiration flux in mm/d.
p: array_like
array_like object with the values as floats representing the
model parameters.
dt: float, optional
time step for the calculation of the recharge. Only dt=1 is
possible now.
Returns
-------
r: numpy.array
Recharge flux calculated by the model.
"""
r = self.get_recharge(prec, evap, fi=p[0], fc=p[1], sr=p[2], de=p[3],
l=p[4], m=p[5], ks=p[6], dt=dt)[0]
return nan_to_num(r)
@staticmethod
@njit
def get_recharge(prec, evap, fi=1.0, fc=1.0, sr=0.5, de=250.0, l=-2.0,
m=0.5, ks=50.0, dt=1.0):
"""
Internal method used for the recharge calculation. If Numba is
available, this method is significantly faster.
"""
n = prec.size
# Create an empty arrays to store the fluxes and states
pe = fi * prec # Effective precipitation flux
ep = fc * evap # Potential evaporation flux
s = zeros(n, dtype=float64) # Root zone storage state
s[0] = 0.5 # Set the initial system state
r = zeros(n, dtype=float64) # Recharge flux
ea = zeros(n, dtype=float64) # Actual evaporation flux
for t in range(n - 1):
# Make sure the reservoir is not too full or empty.
if s[t] < 0.05:
s[t] = 0.05 * exp(20.0 * s[t] - 1.0)
elif s[t] > 0.95:
s[t] = 1 - (0.05 * exp(19.0 - 20.0 * s[t]))
# Calculate the actual evaporation
ea[t] = (1.0 - exp(-3 * s[t] / sr)) * ep[t]
# Calculate the recharge flux
r[t] = ks * s[t] ** l * (1.0 - (1.0 - s[t] ** (1.0 / m)) ** m) ** 2
# Calculate the
s[t + 1] = s[t] + dt / de * (pe[t] - ea[t] - r[t])
return r, s, ea, pe
def get_water_balance(self, prec, evap, p, dt=1.0, **kwargs):
r, s, ea, pe = self.get_recharge(prec, evap, fi=p[0], fc=p[1],
sr=p[2], de=p[3], l=p[4], m=p[5],
ks=p[6], dt=dt)
s = s * p[3] # Because S is computed dimensionless in this model
data = DataFrame(data=vstack((s, pe, -ea, -r)).T,
columns=["S", "Pe", "Ea", "R"])
return data
| [
"pandas.DataFrame",
"numpy.multiply",
"numpy.nan_to_num",
"numpy.zeros",
"numpy.exp",
"numpy.vstack"
] | [((1904, 1966), 'pandas.DataFrame', 'DataFrame', ([], {'columns': "['initial', 'pmin', 'pmax', 'vary', 'name']"}), "(columns=['initial', 'pmin', 'pmax', 'vary', 'name'])\n", (1913, 1966), False, 'from pandas import DataFrame\n'), ((2753, 2815), 'pandas.DataFrame', 'DataFrame', ([], {'columns': "['initial', 'pmin', 'pmax', 'vary', 'name']"}), "(columns=['initial', 'pmin', 'pmax', 'vary', 'name'])\n", (2762, 2815), False, 'from pandas import DataFrame\n'), ((3632, 3649), 'numpy.multiply', 'multiply', (['evap', 'p'], {}), '(evap, p)\n', (3640, 3649), False, 'from numpy import add, float64, multiply, exp, zeros, nan_to_num, vstack\n'), ((4926, 4988), 'pandas.DataFrame', 'DataFrame', ([], {'columns': "['initial', 'pmin', 'pmax', 'vary', 'name']"}), "(columns=['initial', 'pmin', 'pmax', 'vary', 'name'])\n", (4935, 4988), False, 'from pandas import DataFrame\n'), ((6797, 6820), 'numpy.zeros', 'zeros', (['n'], {'dtype': 'float64'}), '(n, dtype=float64)\n', (6802, 6820), False, 'from numpy import add, float64, multiply, exp, zeros, nan_to_num, vstack\n'), ((6936, 6959), 'numpy.zeros', 'zeros', (['n'], {'dtype': 'float64'}), '(n, dtype=float64)\n', (6941, 6959), False, 'from numpy import add, float64, multiply, exp, zeros, nan_to_num, vstack\n'), ((7000, 7023), 'numpy.zeros', 'zeros', (['n'], {'dtype': 'float64'}), '(n, dtype=float64)\n', (7005, 7023), False, 'from numpy import add, float64, multiply, exp, zeros, nan_to_num, vstack\n'), ((7055, 7078), 'numpy.zeros', 'zeros', (['n'], {'dtype': 'float64'}), '(n, dtype=float64)\n', (7060, 7078), False, 'from numpy import add, float64, multiply, exp, zeros, nan_to_num, vstack\n'), ((7123, 7146), 'numpy.zeros', 'zeros', (['n'], {'dtype': 'float64'}), '(n, dtype=float64)\n', (7128, 7146), False, 'from numpy import add, float64, multiply, exp, zeros, nan_to_num, vstack\n'), ((7193, 7216), 'numpy.zeros', 'zeros', (['n'], {'dtype': 'float64'}), '(n, dtype=float64)\n', (7198, 7216), False, 'from numpy import add, float64, multiply, exp, zeros, nan_to_num, vstack\n'), ((7264, 7287), 'numpy.zeros', 'zeros', (['n'], {'dtype': 'float64'}), '(n, dtype=float64)\n', (7269, 7287), False, 'from numpy import add, float64, multiply, exp, zeros, nan_to_num, vstack\n'), ((9850, 9912), 'pandas.DataFrame', 'DataFrame', ([], {'columns': "['initial', 'pmin', 'pmax', 'vary', 'name']"}), "(columns=['initial', 'pmin', 'pmax', 'vary', 'name'])\n", (9859, 9912), False, 'from pandas import DataFrame\n'), ((11309, 11322), 'numpy.nan_to_num', 'nan_to_num', (['r'], {}), '(r)\n', (11319, 11322), False, 'from numpy import add, float64, multiply, exp, zeros, nan_to_num, vstack\n'), ((11846, 11869), 'numpy.zeros', 'zeros', (['n'], {'dtype': 'float64'}), '(n, dtype=float64)\n', (11851, 11869), False, 'from numpy import add, float64, multiply, exp, zeros, nan_to_num, vstack\n'), ((11962, 11985), 'numpy.zeros', 'zeros', (['n'], {'dtype': 'float64'}), '(n, dtype=float64)\n', (11967, 11985), False, 'from numpy import add, float64, multiply, exp, zeros, nan_to_num, vstack\n'), ((12017, 12040), 'numpy.zeros', 'zeros', (['n'], {'dtype': 'float64'}), '(n, dtype=float64)\n', (12022, 12040), False, 'from numpy import add, float64, multiply, exp, zeros, nan_to_num, vstack\n'), ((3538, 3555), 'numpy.multiply', 'multiply', (['evap', 'p'], {}), '(evap, p)\n', (3546, 3555), False, 'from numpy import add, float64, multiply, exp, zeros, nan_to_num, vstack\n'), ((3673, 3690), 'numpy.multiply', 'multiply', (['evap', 'p'], {}), '(evap, p)\n', (3681, 3690), False, 'from numpy import add, float64, multiply, exp, zeros, nan_to_num, vstack\n'), ((3723, 3745), 'numpy.vstack', 'vstack', (['(prec, ea, -r)'], {}), '((prec, ea, -r))\n', (3729, 3745), False, 'from numpy import add, float64, multiply, exp, zeros, nan_to_num, vstack\n'), ((8681, 8715), 'numpy.vstack', 'vstack', (['(si, -ei, sr, pe, -ea, -r)'], {}), '((si, -ei, sr, pe, -ea, -r))\n', (8687, 8715), False, 'from numpy import add, float64, multiply, exp, zeros, nan_to_num, vstack\n'), ((12227, 12249), 'numpy.exp', 'exp', (['(20.0 * s[t] - 1.0)'], {}), '(20.0 * s[t] - 1.0)\n', (12230, 12249), False, 'from numpy import add, float64, multiply, exp, zeros, nan_to_num, vstack\n'), ((12420, 12439), 'numpy.exp', 'exp', (['(-3 * s[t] / sr)'], {}), '(-3 * s[t] / sr)\n', (12423, 12439), False, 'from numpy import add, float64, multiply, exp, zeros, nan_to_num, vstack\n'), ((13080, 13104), 'numpy.vstack', 'vstack', (['(s, pe, -ea, -r)'], {}), '((s, pe, -ea, -r))\n', (13086, 13104), False, 'from numpy import add, float64, multiply, exp, zeros, nan_to_num, vstack\n'), ((12317, 12340), 'numpy.exp', 'exp', (['(19.0 - 20.0 * s[t])'], {}), '(19.0 - 20.0 * s[t])\n', (12320, 12340), False, 'from numpy import add, float64, multiply, exp, zeros, nan_to_num, vstack\n')] |
import numpy as np
from point import Point
from vertex import Vertex
from cluster import Cluster
from edge import Edge
from corrolation import Corroloation
import tensorflow as tf
import scipy
import queue
"""
"""
class Network:
def __init__(self, task_queue = None, result_queue = None, num_points=1000, num_dims=3, corrolation_function = np.corrcoef):
self.edges = []
self.vertices = []
self.points = []
self.clusters = []
self.num_points = num_pointss
self.num_dims = num_dims
self.corrolation_function = corrolation_function
self.task_queue = task_queue
self.result_queue = result_queue
self.vert_matrix = None
self.generate_points()
def generate_points(self):
for i in range(self.num_points):
self.points.append(Point(self.num_dims, np.random.random_integers(100, size=(self.num_dims,))))
self.vertices.append(Vertex())
def compute_corrolations(self):
self.vert_matrix = np.ndarray(shape=(len(self.points), len(self.points)))
tmp_holder = []
for point in self.points:
self.task_queue.put(
Corroloation(
self.corrolation_function,
point,
self.points
)
)
for x in range(len(self.points)):
tmp_holder.append(self.result_queue.get())
counter = 0
for x in self.find_total_ordering(tmp_holder):
self.vert_matrix[counter] = x[:][-1]
def find_total_ordering(self, unsorted_array: []) -> []:
final_ordering = []
best_start_tensor = unsorted_array[0]
for tensor in unsorted_array:
if (np.sum(tensor[:][-1]) > np.sum(best_start_tensor[:][-1])):
best_start_tensor = tensor
# append initial start tensor
final_ordering.append(best_start_tensor)
past_tensor = best_start_tensor
# loop through and append the next best until empty
# this greedy style should take under n^2 time
for x in range(len(unsorted_array)):
# take the best of the rest
next_tensor = past_tensor.index(np.max(set.difference(set(unsorted_array), set(final_ordering))))
final_ordering.append(next_tensor)
past_tensor = next_tensor
return final_ordering
def find_clusters(self):
q = queue.Queue()
first_cluster = Cluster()
first_cluster.edges = self.edges
first_cluster.vertices = self.vertices
first_cluster.build_array()
self.clusters.append(first_cluster)
q.put(first_cluster)
while not q.empty():
cluster = q.get()
if cluster.should_cluster_be_split():
new_cluster = Cluster()
cluster.split_cluster(new_cluster)
self.clusters.append(new_cluster)
if not cluster.verticies.__len__() > 0:
q.put(cluster)
if new_cluster.verticies.__len__() > 0:
q.put(new_cluster)
| [
"numpy.random.random_integers",
"numpy.sum",
"corrolation.Corroloation",
"cluster.Cluster",
"vertex.Vertex",
"queue.Queue"
] | [((2449, 2462), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (2460, 2462), False, 'import queue\n'), ((2487, 2496), 'cluster.Cluster', 'Cluster', ([], {}), '()\n', (2494, 2496), False, 'from cluster import Cluster\n'), ((954, 962), 'vertex.Vertex', 'Vertex', ([], {}), '()\n', (960, 962), False, 'from vertex import Vertex\n'), ((1190, 1249), 'corrolation.Corroloation', 'Corroloation', (['self.corrolation_function', 'point', 'self.points'], {}), '(self.corrolation_function, point, self.points)\n', (1202, 1249), False, 'from corrolation import Corroloation\n'), ((1753, 1774), 'numpy.sum', 'np.sum', (['tensor[:][-1]'], {}), '(tensor[:][-1])\n', (1759, 1774), True, 'import numpy as np\n'), ((1777, 1809), 'numpy.sum', 'np.sum', (['best_start_tensor[:][-1]'], {}), '(best_start_tensor[:][-1])\n', (1783, 1809), True, 'import numpy as np\n'), ((2833, 2842), 'cluster.Cluster', 'Cluster', ([], {}), '()\n', (2840, 2842), False, 'from cluster import Cluster\n'), ((865, 918), 'numpy.random.random_integers', 'np.random.random_integers', (['(100)'], {'size': '(self.num_dims,)'}), '(100, size=(self.num_dims,))\n', (890, 918), True, 'import numpy as np\n')] |
import hashlib
import json
from typing import List, Set
import numpy as np
import pandas as pd
import torch
from scipy import sparse as sp
import src.config as cfg
class ProductEncoderMini:
def __init__(self, top_products):
self._all_pids = top_products
self.idx = {}
self.pid = {}
for idx, pid in enumerate(top_products):
self.idx[pid] = idx
self.pid[idx] = pid
def isAllowed(self, pid):
return pid in self.idx
def filter(self, seq):
return [x for x in seq if self.isAllowed(x)]
def toIdx(self, x):
if type(x) == str:
pid = x
return self.idx[pid]
return [self.idx[pid] for pid in x]
def toIdxWithFilter(self, x):
if type(x) == str:
pid = x
return self.idx[pid]
return [self.idx[pid] for pid in x if self.isAllowed(pid)]
def toPid(self, x):
if type(x) == int:
idx = x
return self.pid[idx]
return [self.pid[idx] for idx in x]
@property
def num_products(self):
return len(self.idx)
class TrainingSampleMini:
def __init__(self, history: List[str], target_items: Set[str], row=None, client_id: str = None):
self.history = history
self.row = row
self.target_items = target_items
self.client_id = client_id
def squeeze_history(transaction_history):
items = []
for trans in transaction_history:
items.extend([i["product_id"] for i in trans["products"]])
return items
def make_coo_row_mini(transaction_history, product_encoder: ProductEncoderMini):
idx = []
values = []
items = []
for trans in transaction_history:
items.extend([i["product_id"] for i in trans["products"]])
items = [x for x in items if product_encoder.isAllowed(x)]
n_items = len(items)
for pid in items:
idx.append(product_encoder.toIdx(pid))
values.append(1.0 / n_items)
return sp.coo_matrix(
(np.array(values).astype(np.float32), ([0] * len(idx), idx)), shape=(1, product_encoder.num_products),
)
| [
"numpy.array"
] | [((2026, 2042), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (2034, 2042), True, 'import numpy as np\n')] |
'''
based on https://github.com/asmith26/wide_resnets_keras
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import os
import logging
logging.basicConfig(level=logging.DEBUG)
import sys
sys.stdout = sys.stderr
# Prevent reaching to maximum recursion depth in `theano.tensor.grad`
sys.setrecursionlimit(2 ** 20)
import numpy as np
np.random.seed(2 ** 10)
from keras.datasets import cifar10
from keras.models import Model
from keras.layers import Input, Activation, merge, Dense, Flatten, Dropout
from keras.layers.convolutional import Convolution2D, AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD
from keras.regularizers import l2
from keras.callbacks import LearningRateScheduler, ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
from keras import backend as K
# ================================================
# DATA CONFIGURATION:
logging.debug("Loading data...")
nb_classes = 10
image_size = 32
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
# ================================================
# ================================================
# NETWORK/TRAINING CONFIGURATION:
logging.debug("Loading network/training configuration...")
depth = 16
k = 4
dropout_probability = 0.3 # 0.3 for cifar10 and 0.4 for svhn
weight_decay = 0.0005 # page 10: "Used in all experiments"
batch_size = 128 # page 8: "Used in all experiments"
# Regarding nb_epochs, lr_schedule and sgd, see bottom page 10:
nb_epochs = 200
lr_schedule = [60, 120, 160] # epoch_step
def schedule(epoch_idx):
if (epoch_idx + 1) < lr_schedule[0]:
return 0.1
elif (epoch_idx + 1) < lr_schedule[1]:
return 0.02 # lr_decay_ratio = 0.2
elif (epoch_idx + 1) < lr_schedule[2]:
return 0.004
return 0.0008
sgd = SGD(lr=0.1, momentum=0.9, nesterov=True)
# Other config from code; throughtout all layer:
use_bias = False # following functions 'FCinit(model)' and 'DisableBias(model)' in utils.lua
weight_init="he_normal" # follows the 'MSRinit(model)' function in utils.lua
# Keras specific
if K.image_dim_ordering() == "th":
logging.debug("image_dim_ordering = 'th'")
channel_axis = 1
input_shape = (3, image_size, image_size)
else:
logging.debug("image_dim_ordering = 'tf'")
channel_axis = -1
input_shape = (image_size, image_size, 3)
# ================================================
# ================================================
# OUTPUT CONFIGURATION:
print_model_summary = True
save_model_and_weights = True
save_model_plot = False
MODEL_PATH = os.environ.get('MODEL_PATH', 'models/')
CHECKPOINT_PATH = os.environ.get('CHECKPOINT_PATH', 'checkpoints/')
# ================================================
# Wide residual network http://arxiv.org/abs/1605.07146
def _wide_basic(n_input_plane, n_output_plane, stride):
def f(net):
# format of conv_params:
# [ [nb_col="kernel width", nb_row="kernel height",
# subsample="(stride_vertical,stride_horizontal)",
# border_mode="same" or "valid"] ]
# B(3,3): orignal <<basic>> block
conv_params = [ [3,3,stride,"same"],
[3,3,(1,1),"same"] ]
n_bottleneck_plane = n_output_plane
# Residual block
for i, v in enumerate(conv_params):
if i == 0:
if n_input_plane != n_output_plane:
net = BatchNormalization(axis=channel_axis)(net)
net = Activation("relu")(net)
convs = net
else:
convs = BatchNormalization(axis=channel_axis)(net)
convs = Activation("relu")(convs)
convs = Convolution2D(n_bottleneck_plane, nb_col=v[0], nb_row=v[1],
subsample=v[2],
border_mode=v[3],
init=weight_init,
W_regularizer=l2(weight_decay),
bias=use_bias)(convs)
else:
convs = BatchNormalization(axis=channel_axis)(convs)
convs = Activation("relu")(convs)
if dropout_probability > 0:
convs = Dropout(dropout_probability)(convs)
convs = Convolution2D(n_bottleneck_plane, nb_col=v[0], nb_row=v[1],
subsample=v[2],
border_mode=v[3],
init=weight_init,
W_regularizer=l2(weight_decay),
bias=use_bias)(convs)
# Shortcut Conntection: identity function or 1x1 convolutional
# (depends on difference between input & output shape - this
# corresponds to whether we are using the first block in each
# group; see _layer() ).
if n_input_plane != n_output_plane:
shortcut = Convolution2D(n_output_plane, nb_col=1, nb_row=1,
subsample=stride,
border_mode="same",
init=weight_init,
W_regularizer=l2(weight_decay),
bias=use_bias)(net)
else:
shortcut = net
return merge([convs, shortcut], mode="sum")
return f
# "Stacking Residual Units on the same stage"
def _layer(block, n_input_plane, n_output_plane, count, stride):
def f(net):
net = block(n_input_plane, n_output_plane, stride)(net)
for i in range(2,int(count+1)):
net = block(n_output_plane, n_output_plane, stride=(1,1))(net)
return net
return f
def create_model():
logging.debug("Creating model...")
assert((depth - 4) % 6 == 0)
n = (depth - 4) / 6
inputs = Input(shape=input_shape)
n_stages=[16, 16*k, 32*k, 64*k]
conv1 = Convolution2D(nb_filter=n_stages[0], nb_row=3, nb_col=3,
subsample=(1, 1),
border_mode="same",
init=weight_init,
W_regularizer=l2(weight_decay),
bias=use_bias)(inputs) # "One conv at the beginning (spatial size: 32x32)"
# Add wide residual blocks
block_fn = _wide_basic
conv2 = _layer(block_fn, n_input_plane=n_stages[0], n_output_plane=n_stages[1], count=n, stride=(1,1))(conv1)# "Stage 1 (spatial size: 32x32)"
conv3 = _layer(block_fn, n_input_plane=n_stages[1], n_output_plane=n_stages[2], count=n, stride=(2,2))(conv2)# "Stage 2 (spatial size: 16x16)"
conv4 = _layer(block_fn, n_input_plane=n_stages[2], n_output_plane=n_stages[3], count=n, stride=(2,2))(conv3)# "Stage 3 (spatial size: 8x8)"
batch_norm = BatchNormalization(axis=channel_axis)(conv4)
relu = Activation("relu")(batch_norm)
# Classifier block
pool = AveragePooling2D(pool_size=(8, 8), strides=(1, 1), border_mode="same")(relu)
flatten = Flatten()(pool)
predictions = Dense(output_dim=nb_classes, init=weight_init, bias=use_bias,
W_regularizer=l2(weight_decay), activation="softmax")(flatten)
model = Model(input=inputs, output=predictions)
return model
if __name__ == '__main__':
model = create_model()
model.summary()
json_string = model.to_json()
with open("wideresnet_16_4.json", "w")as jsonf:
jsonf.write(json_string)
jsonf.close()
# model.compile(optimizer=sgd, loss="categorical_crossentropy", metrics=['accuracy'])
# if print_model_summary:
# logging.debug("Model summary...")
# model.count_params()
# model.summary()
# if save_model_plot:
# logging.debug("Saving model plot...")
# mk_dir(MODEL_PATH)
# from keras.utils.visualize_util import plot
# plot(model, to_file=os.path.join(MODEL_PATH, 'WRN-{0}-{1}.png'.format(depth, k)), show_shapes=True)
# # Data Augmentation based on page 6 (see README for full details)
# logging.debug("Creating ImageDataGenerators...")
# train_datagen = ImageDataGenerator(
# featurewise_center=True,
# featurewise_std_normalization=True,
# zca_whitening=True,
# horizontal_flip=True)
# train_datagen.fit(X_train, augment=True, rounds=2)
# test_datagen = ImageDataGenerator(
# featurewise_center=True,
# featurewise_std_normalization=True,
# zca_whitening=True)
# test_datagen.fit(X_train)
# mk_dir(CHECKPOINT_PATH)
# callbacks = [ LearningRateScheduler(schedule=schedule),
# ModelCheckpoint(CHECKPOINT_PATH+'/weights.{epoch:02d}-{val_loss:.2f}.hdf5',
# monitor='val_loss',
# verbose=1,
# save_best_only=True,
# mode='auto')
# ]
# logging.debug("Running training...")
# # fit the model on the batches generated by train_datagen.flow()
# model.fit_generator(train_datagen.flow(X_train, Y_train, batch_size=batch_size, shuffle=True),
# samples_per_epoch=X_train.shape[0],
# nb_epoch=nb_epochs,
# validation_data=test_datagen.flow(X_test, Y_test, batch_size=batch_size),
# nb_val_samples=X_test.shape[0],
# callbacks=callbacks)
# if save_model_and_weights:
# logging.debug("Saving model...")
# mk_dir(MODEL_PATH)
# with open( os.path.join(MODEL_PATH, 'WRN-{0}-{1}.json'.format(depth, k)), 'w') as f:
# f.write(model.to_json())
# model.save_weights( os.path.join(MODEL_PATH, 'WRN-{0}-{1}.h5'.format(depth, k)), overwrite=True) | [
"keras.regularizers.l2",
"keras.layers.convolutional.AveragePooling2D",
"numpy.random.seed",
"keras.datasets.cifar10.load_data",
"logging.debug",
"logging.basicConfig",
"keras.optimizers.SGD",
"keras.layers.Activation",
"keras.layers.Dropout",
"keras.layers.Flatten",
"keras.models.Model",
"os.... | [((228, 268), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (247, 268), False, 'import logging\n'), ((375, 405), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(2 ** 20)'], {}), '(2 ** 20)\n', (396, 405), False, 'import sys\n'), ((426, 449), 'numpy.random.seed', 'np.random.seed', (['(2 ** 10)'], {}), '(2 ** 10)\n', (440, 449), True, 'import numpy as np\n'), ((1051, 1083), 'logging.debug', 'logging.debug', (['"""Loading data..."""'], {}), "('Loading data...')\n", (1064, 1083), False, 'import logging\n'), ((1157, 1176), 'keras.datasets.cifar10.load_data', 'cifar10.load_data', ([], {}), '()\n', (1174, 1176), False, 'from keras.datasets import cifar10\n'), ((1307, 1351), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_train', 'nb_classes'], {}), '(y_train, nb_classes)\n', (1330, 1351), False, 'from keras.utils import np_utils\n'), ((1361, 1404), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_test', 'nb_classes'], {}), '(y_test, nb_classes)\n', (1384, 1404), False, 'from keras.utils import np_utils\n'), ((1542, 1600), 'logging.debug', 'logging.debug', (['"""Loading network/training configuration..."""'], {}), "('Loading network/training configuration...')\n", (1555, 1600), False, 'import logging\n'), ((2215, 2255), 'keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.1)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=0.1, momentum=0.9, nesterov=True)\n', (2218, 2255), False, 'from keras.optimizers import SGD\n'), ((2993, 3032), 'os.environ.get', 'os.environ.get', (['"""MODEL_PATH"""', '"""models/"""'], {}), "('MODEL_PATH', 'models/')\n", (3007, 3032), False, 'import os\n'), ((3051, 3100), 'os.environ.get', 'os.environ.get', (['"""CHECKPOINT_PATH"""', '"""checkpoints/"""'], {}), "('CHECKPOINT_PATH', 'checkpoints/')\n", (3065, 3100), False, 'import os\n'), ((2504, 2526), 'keras.backend.image_dim_ordering', 'K.image_dim_ordering', ([], {}), '()\n', (2524, 2526), True, 'from keras import backend as K\n'), ((2540, 2582), 'logging.debug', 'logging.debug', (['"""image_dim_ordering = \'th\'"""'], {}), '("image_dim_ordering = \'th\'")\n', (2553, 2582), False, 'import logging\n'), ((2660, 2702), 'logging.debug', 'logging.debug', (['"""image_dim_ordering = \'tf\'"""'], {}), '("image_dim_ordering = \'tf\'")\n', (2673, 2702), False, 'import logging\n'), ((6283, 6317), 'logging.debug', 'logging.debug', (['"""Creating model..."""'], {}), "('Creating model...')\n", (6296, 6317), False, 'import logging\n'), ((6398, 6422), 'keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (6403, 6422), False, 'from keras.layers import Input, Activation, merge, Dense, Flatten, Dropout\n'), ((7793, 7832), 'keras.models.Model', 'Model', ([], {'input': 'inputs', 'output': 'predictions'}), '(input=inputs, output=predictions)\n', (7798, 7832), False, 'from keras.models import Model\n'), ((5857, 5893), 'keras.layers.merge', 'merge', (['[convs, shortcut]'], {'mode': '"""sum"""'}), "([convs, shortcut], mode='sum')\n", (5862, 5893), False, 'from keras.layers import Input, Activation, merge, Dense, Flatten, Dropout\n'), ((7340, 7377), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {'axis': 'channel_axis'}), '(axis=channel_axis)\n', (7358, 7377), False, 'from keras.layers.normalization import BatchNormalization\n'), ((7396, 7414), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (7406, 7414), False, 'from keras.layers import Input, Activation, merge, Dense, Flatten, Dropout\n'), ((7506, 7576), 'keras.layers.convolutional.AveragePooling2D', 'AveragePooling2D', ([], {'pool_size': '(8, 8)', 'strides': '(1, 1)', 'border_mode': '"""same"""'}), "(pool_size=(8, 8), strides=(1, 1), border_mode='same')\n", (7522, 7576), False, 'from keras.layers.convolutional import Convolution2D, AveragePooling2D\n'), ((7597, 7606), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (7604, 7606), False, 'from keras.layers import Input, Activation, merge, Dense, Flatten, Dropout\n'), ((6705, 6721), 'keras.regularizers.l2', 'l2', (['weight_decay'], {}), '(weight_decay)\n', (6707, 6721), False, 'from keras.regularizers import l2\n'), ((7731, 7747), 'keras.regularizers.l2', 'l2', (['weight_decay'], {}), '(weight_decay)\n', (7733, 7747), False, 'from keras.regularizers import l2\n'), ((4564, 4601), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {'axis': 'channel_axis'}), '(axis=channel_axis)\n', (4582, 4601), False, 'from keras.layers.normalization import BatchNormalization\n'), ((4633, 4651), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4643, 4651), False, 'from keras.layers import Input, Activation, merge, Dense, Flatten, Dropout\n'), ((3875, 3912), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {'axis': 'channel_axis'}), '(axis=channel_axis)\n', (3893, 3912), False, 'from keras.layers.normalization import BatchNormalization\n'), ((3944, 3962), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3954, 3962), False, 'from keras.layers import Input, Activation, merge, Dense, Flatten, Dropout\n'), ((4050, 4087), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {'axis': 'channel_axis'}), '(axis=channel_axis)\n', (4068, 4087), False, 'from keras.layers.normalization import BatchNormalization\n'), ((4121, 4139), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4131, 4139), False, 'from keras.layers import Input, Activation, merge, Dense, Flatten, Dropout\n'), ((4730, 4758), 'keras.layers.Dropout', 'Dropout', (['dropout_probability'], {}), '(dropout_probability)\n', (4737, 4758), False, 'from keras.layers import Input, Activation, merge, Dense, Flatten, Dropout\n'), ((5725, 5741), 'keras.regularizers.l2', 'l2', (['weight_decay'], {}), '(weight_decay)\n', (5727, 5741), False, 'from keras.regularizers import l2\n'), ((4445, 4461), 'keras.regularizers.l2', 'l2', (['weight_decay'], {}), '(weight_decay)\n', (4447, 4461), False, 'from keras.regularizers import l2\n'), ((5064, 5080), 'keras.regularizers.l2', 'l2', (['weight_decay'], {}), '(weight_decay)\n', (5066, 5080), False, 'from keras.regularizers import l2\n')] |
#!/usr/bin/env python3
# vim: set fileencoding=utf-8 :
"""Extracts the embedding from a model trained by lstm_lm.py."""
from __future__ import absolute_import, division, print_function
from argparse import ArgumentParser
import os
import numpy as np
import tensorflow as tf
from emLam.utils import openall
def parse_arguments():
parser = ArgumentParser(
description='Extracts the embedding from a model trained by lstm_lm.py.')
parser.add_argument('vocab_file',
help='the vocabulary file.')
parser.add_argument('output_file',
help='the output file, to which the embedding is saved.')
parser.add_argument('--model-name', '-m', default='RNN CLM',
help='the name of the model [RNN CLM].')
return parser.parse_args()
def read_vocab(vocab_file):
with openall(vocab_file) as inf:
return [line.split('\t')[0] for line in inf]
def main():
args = parse_arguments()
vocab = read_vocab(args.vocab_file)
with tf.Session() as session:
save_dir = os.path.join('saves', args.model_name)
checkpoint_path = tf.train.latest_checkpoint(save_dir)
if checkpoint_path:
saver = tf.train.import_meta_graph('{}.meta'.format(checkpoint_path))
saver.restore(session, checkpoint_path)
else:
raise ValueError('No saved model exists.')
# TODO change to GLOBAL_VARIABLES in 0.12+
embedding = tf.get_collection(tf.GraphKeys.VARIABLES,
scope='Model/embedding:0')[0]
em = session.run(embedding)
np.savez(args.output_file + '.npz', vocab=vocab, embedding=em)
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"tensorflow.get_collection",
"tensorflow.Session",
"tensorflow.train.latest_checkpoint",
"emLam.utils.openall",
"numpy.savez",
"os.path.join"
] | [((348, 441), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Extracts the embedding from a model trained by lstm_lm.py."""'}), "(description=\n 'Extracts the embedding from a model trained by lstm_lm.py.')\n", (362, 441), False, 'from argparse import ArgumentParser\n'), ((858, 877), 'emLam.utils.openall', 'openall', (['vocab_file'], {}), '(vocab_file)\n', (865, 877), False, 'from emLam.utils import openall\n'), ((1032, 1044), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1042, 1044), True, 'import tensorflow as tf\n'), ((1076, 1114), 'os.path.join', 'os.path.join', (['"""saves"""', 'args.model_name'], {}), "('saves', args.model_name)\n", (1088, 1114), False, 'import os\n'), ((1141, 1177), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['save_dir'], {}), '(save_dir)\n', (1167, 1177), True, 'import tensorflow as tf\n'), ((1634, 1696), 'numpy.savez', 'np.savez', (["(args.output_file + '.npz')"], {'vocab': 'vocab', 'embedding': 'em'}), "(args.output_file + '.npz', vocab=vocab, embedding=em)\n", (1642, 1696), True, 'import numpy as np\n'), ((1480, 1548), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.VARIABLES'], {'scope': '"""Model/embedding:0"""'}), "(tf.GraphKeys.VARIABLES, scope='Model/embedding:0')\n", (1497, 1548), True, 'import tensorflow as tf\n')] |
import glm
import numpy
class OBJ:
def __init__(self, file):
self.vertices = []
self.textures = []
self.normals = []
self.indices = []
with open('model\\data\\models\\'+file, 'r') as f:
for l in f:
l = l.strip('\n')
ll = l.split(' ')
if ll[0] == 'v':
vert = glm.vec3(float(ll[1]), float(ll[2]), float(ll[3]))
self.vertices.append(vert)
elif ll[0] == 'vt':
tex = glm.vec2(float(ll[1]), float(ll[2]))
self.textures.append(tex)
elif ll[0] == 'vn':
norm = glm.vec3(float(ll[1]), float(ll[2]), float(ll[3]))
self.normals.append(norm)
elif ll[0] == 's':
self.texturesArray = [None] * len(self.vertices) * 2
self.normalsArray = [None] * len(self.vertices) * 3
break
for l in f:
l = l.strip('\n')
ll = l.split(' ')
if ll[0] == 'f':
for e in ll[1:]:
vertexData = e.split('/')
self.processVertex(vertexData, self.indices, self.textures, self.normals, self.texturesArray, self.normalsArray)
self.vertexArray = [None] * len(self.vertices) * 3
#self.indicesArray = [None] * len(self.indices)
#print(f'after {len(self.indices)}')
i = 0
for e in self.vertices:
self.vertexArray[i] = e.x
i = i + 1
self.vertexArray[i] = e.y
i = i + 1
self.vertexArray[i] = e.z
i = i + 1
self.indicesArray = self.indices
self.vertexArray = numpy.array(self.vertexArray, dtype=numpy.float32)
self.normalsArray = numpy.array(self.normalsArray, dtype=numpy.float32)
self.texturesArray = numpy.array(self.texturesArray, dtype=numpy.float32)
self.indicesArray = numpy.array(self.indicesArray, dtype=numpy.int32)
self.model = {'vertex':self.vertexArray, 'normal':self.normalsArray, 'texture':self.texturesArray, 'index':self.indicesArray, 'count':len(self.indicesArray)}
def processVertex(self, vertexData, indices, textures, normals, tArray, nArray):
currentVertexPointer = int(vertexData[0])-1
#print(vertexData)
indices.append(currentVertexPointer)
if vertexData[1] != '':
c_texture = glm.vec2(textures[int(vertexData[1])-1])
tArray[currentVertexPointer*2] = c_texture.x
tArray[currentVertexPointer*2+1] = c_texture.y
if vertexData[2] != '':
c_normal = glm.vec3(normals[int(vertexData[2])-1])
nArray[currentVertexPointer*3] = c_normal.x
nArray[currentVertexPointer*3+1] = c_normal.y
nArray[currentVertexPointer*3+2] = c_normal.z
def loadObj(self):
return self.model | [
"numpy.array"
] | [((1906, 1956), 'numpy.array', 'numpy.array', (['self.vertexArray'], {'dtype': 'numpy.float32'}), '(self.vertexArray, dtype=numpy.float32)\n', (1917, 1956), False, 'import numpy\n'), ((1985, 2036), 'numpy.array', 'numpy.array', (['self.normalsArray'], {'dtype': 'numpy.float32'}), '(self.normalsArray, dtype=numpy.float32)\n', (1996, 2036), False, 'import numpy\n'), ((2066, 2118), 'numpy.array', 'numpy.array', (['self.texturesArray'], {'dtype': 'numpy.float32'}), '(self.texturesArray, dtype=numpy.float32)\n', (2077, 2118), False, 'import numpy\n'), ((2147, 2196), 'numpy.array', 'numpy.array', (['self.indicesArray'], {'dtype': 'numpy.int32'}), '(self.indicesArray, dtype=numpy.int32)\n', (2158, 2196), False, 'import numpy\n')] |
import os
import numpy as np
from mpunet.callbacks import init_callback_objects
from mpunet.logging import ScreenLogger
from mpunet.callbacks import (SavePredictionImages, Validation,
FGBatchBalancer, DividerLine,
LearningCurve, MemoryConsumption,
MeanReduceLogArrays, remove_validation_callbacks)
from mpunet.utils import ensure_list_or_tuple
from mpunet.train.utils import (ensure_sparse,
init_losses,
init_metrics,
init_optimizer)
from multiprocessing import cpu_count
from tensorflow.python.framework.errors_impl import (ResourceExhaustedError,
InternalError)
def get_steps(sequence, im_per_epoch=None):
""" Returns the number of gradient steps to take in an epoch """
if im_per_epoch:
steps = int(np.ceil(im_per_epoch / sequence.batch_size))
else:
steps = len(sequence)
return steps
class Trainer(object):
"""
Handles initialization and logging of model fitting sessions.
"""
def __init__(self, model, logger=None):
"""
Init. simply accepts a model and stores it.
Optionally, an 'org_model' (original model) may be passed and stored
as well. This is for training multi-GPU models prepared by the
tf.keras.utils.multi_gpu_model utility, which returns a new, split
model for training (passed as 'model' parameter here). For properly
saving the model parameter, however, it is recommended to use the
original, non-split model (here passed as 'org_model').
Args:
model: (tf.keras Model) Initialized model to train
org_model: (tf.keras Model) Optional single-GPU version for the
passed 'model' parameter.
logger: (Logger) Optional Logger instance
"""
self.model = model
self.logger = logger if logger is not None else ScreenLogger()
def compile_model(self, optimizer, loss, metrics, reduction,
check_sparse=False, optimizer_kwargs={}, loss_kwargs={},
**kwargs):
"""
Compile the stored tf.keras Model instance stored in self.model
Sets the loss function, optimizer and metrics
Args:
optimizer: (string) The name of a tf.keras.optimizers Optimizer
optimizer_kwargs: (dict) Key-word arguments passed to the Optimizer
loss: (string) The name of a tf.keras.losses or
MultiPlanarUnet loss function
metrics: (list) List of tf.keras.metrics or
mpunet metrics.
reduction: TODO
check_sparse: TODO
**kwargs: (dict) Key-word arguments passed to losses
and/or metrics that accept such.
"""
# Make sure sparse metrics and loss are specified as sparse
metrics = ensure_list_or_tuple(metrics)
losses = ensure_list_or_tuple(loss)
if check_sparse:
ensure_sparse(metrics+losses)
# Initialize optimizer, loss(es) and metric(s) from tf.keras or
# mpunet
optimizer = init_optimizer(optimizer, self.logger, **optimizer_kwargs)
losses = init_losses(losses, self.logger, **kwargs)
for i, loss in enumerate(losses):
try:
losses[i] = loss(reduction=reduction,
**loss_kwargs)
except (ValueError, TypeError):
raise TypeError("All loss functions must currently be "
"callable and accept the 'reduction' "
"parameter specifying a "
"tf.keras.losses.Reduction type. If you "
"specified a keras loss function such as "
"'sparse_categorical_crossentropy', change "
"this to its corresponding loss class "
"'SparseCategoricalCrossentropy'. If "
"you implemented a custom loss function, "
"please raise an issue on GitHub.")
metrics = init_metrics(metrics, self.logger, **kwargs)
# Compile the model
self.model.compile(optimizer=optimizer, loss=losses, metrics=metrics)
self.logger("Optimizer: %s" % optimizer)
self.logger("Loss funcs: %s" % losses)
self.logger("Metrics: %s" % init_metrics)
return self
def fit(self, train, val, batch_size, no_im=False, **fit_kwargs):
"""
Fit the stored tf.keras Model (self.model) on a set of data.
The 'fit' method is a wrapper around the hidden '_fit' method. It
handles KeyboardInterrupts (--> stopping training prematurely), TF
GPU memory errors (--> batch_size is reduced by 2 and training
restarted), and other exceptions (--> error logged and training
terminated).
Please refer to the self._fit method for 'fit_kwargs' argument details.
Args:
train: TODO
val: TODO
batch_size: (int) The initial batch size to run training with
no_im: TODO
fit_kwargs: (dict) Keyword arguments passed to self._fit
"""
# Crop labels?
if hasattr(self.model, "label_crop"):
train.label_crop = self.model.label_crop
val.label_crop = self.model.label_crop
if type(train).__name__ == "MultiTaskSequence":
self.logger("-- Skipping saving images (not yet implemented for"
" MultiTaskSequences).")
no_im = True
# Save a few images to disk for inspection
if no_im:
self.logger("No images saved (--no_images flag is set)")
else:
from mpunet.utils.plotting import save_images
im_path = os.path.join(self.logger.base_path, "images")
save_images(train, val, im_path, self.logger)
# Start fitting
fitting = True
while fitting:
try:
self._fit(train=train,
val=val,
batch_size=batch_size,
no_im=no_im,
**fit_kwargs)
fitting = False
except (ResourceExhaustedError, InternalError):
# Reduce batch size
batch_size -= 2
self.logger("\n\n[MEMORY ERROR] Reducing batch size "
"by 2 (now %i)" % batch_size)
if batch_size < 1:
self.logger("[ERROR] Batch size negative or zero!")
fitting = False
except KeyboardInterrupt:
fitting = False
except Exception as e:
self.logger(e)
raise e
try:
if train.image_pair_loader.queue:
train.image_pair_loader.queue.stop()
if val.image_pair_loader.queue:
val.image_pair_loader.queue.stop()
except AttributeError:
# Multi-tasking, train.image_pair_loader will be a list
# TODO: Make all sequences store a reference to the queue
pass
self.logger("Training stopped.")
self.logger.print_calling_method = True
return self.model
def _fit(self,
train,
val,
batch_size,
n_epochs,
callbacks,
train_im_per_epoch,
val_im_per_epoch,
val_ignore_class_zero=True,
no_im=False,
verbose=1,
init_epoch=0,
use_multiprocessing=False,
**unused):
train.batch_size = batch_size
# Get number of steps per train epoch
train_steps = get_steps(train, train_im_per_epoch)
self.logger("Using %i steps per train epoch (total batches=%i)" %
(train_steps, len(train)))
if val is None:
# No validation to be performed, remove callbacks that might need
# validation data to function properly
remove_validation_callbacks(callbacks, self.logger)
else:
val.batch_size = batch_size
val_steps = get_steps(val, val_im_per_epoch)
self.logger("Using %i steps per val epoch (total batches=%i)" %
(val_steps, len(val)))
# Add validation callback
# Important: Should be first in callbacks list as other CBs may
# depend on the validation metrics/loss
validation = Validation(val,
steps=val_steps,
ignore_class_zero=val_ignore_class_zero,
logger=self.logger,
verbose=verbose)
callbacks = [validation] + callbacks
# Add various callbacks for plotting learning curves etc.
# Get FGBatchBalancer callbacks, etc.
if hasattr(train, "n_fg_slices"):
callbacks.append(FGBatchBalancer(train, logger=self.logger))
if not no_im:
# Add save images cb
callbacks.append(SavePredictionImages(train, val))
callbacks.insert(1, MeanReduceLogArrays())
# callbacks.insert(1, MemoryConsumption(logger=self.logger))
callbacks.append(LearningCurve(logger=self.logger))
callbacks.append(DividerLine(self.logger))
# Get initialized callback objects
callbacks, cb_dict = init_callback_objects(callbacks, self.logger)
# If ModelCheckPointClean is used, set the original model to store
# the correct weights when using multi-GPU models
cb = cb_dict.get("ModelCheckPointClean")
if cb:
cb.org_model = self.model # TEMP TODO
# Temporary memory leak fix
import tensorflow as tf
dtypes, shapes = list(zip(*map(lambda x: (x.dtype, x.shape), train[0])))
train = tf.data.Dataset.from_generator(train, dtypes, shapes)
# Fit the model
# is_queued = bool(train.image_pair_loader.queue)
self.logger.active_log_file = "training"
self.logger.print_calling_method = False
self.model.fit(
train,
steps_per_epoch=train_steps,
epochs=n_epochs,
callbacks=callbacks,
initial_epoch=init_epoch,
use_multiprocessing=use_multiprocessing,
workers=5,
max_queue_size=5,
shuffle=False, # Determined by the chosen Sequence class
verbose=verbose
)
| [
"numpy.ceil",
"mpunet.train.utils.init_metrics",
"mpunet.logging.ScreenLogger",
"mpunet.callbacks.remove_validation_callbacks",
"mpunet.callbacks.LearningCurve",
"mpunet.utils.plotting.save_images",
"mpunet.callbacks.FGBatchBalancer",
"mpunet.callbacks.DividerLine",
"mpunet.train.utils.init_optimize... | [((3190, 3219), 'mpunet.utils.ensure_list_or_tuple', 'ensure_list_or_tuple', (['metrics'], {}), '(metrics)\n', (3210, 3219), False, 'from mpunet.utils import ensure_list_or_tuple\n'), ((3237, 3263), 'mpunet.utils.ensure_list_or_tuple', 'ensure_list_or_tuple', (['loss'], {}), '(loss)\n', (3257, 3263), False, 'from mpunet.utils import ensure_list_or_tuple\n'), ((3441, 3499), 'mpunet.train.utils.init_optimizer', 'init_optimizer', (['optimizer', 'self.logger'], {}), '(optimizer, self.logger, **optimizer_kwargs)\n', (3455, 3499), False, 'from mpunet.train.utils import ensure_sparse, init_losses, init_metrics, init_optimizer\n'), ((3517, 3559), 'mpunet.train.utils.init_losses', 'init_losses', (['losses', 'self.logger'], {}), '(losses, self.logger, **kwargs)\n', (3528, 3559), False, 'from mpunet.train.utils import ensure_sparse, init_losses, init_metrics, init_optimizer\n'), ((4496, 4540), 'mpunet.train.utils.init_metrics', 'init_metrics', (['metrics', 'self.logger'], {}), '(metrics, self.logger, **kwargs)\n', (4508, 4540), False, 'from mpunet.train.utils import ensure_sparse, init_losses, init_metrics, init_optimizer\n'), ((9971, 10016), 'mpunet.callbacks.init_callback_objects', 'init_callback_objects', (['callbacks', 'self.logger'], {}), '(callbacks, self.logger)\n', (9992, 10016), False, 'from mpunet.callbacks import init_callback_objects\n'), ((10432, 10485), 'tensorflow.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', (['train', 'dtypes', 'shapes'], {}), '(train, dtypes, shapes)\n', (10462, 10485), True, 'import tensorflow as tf\n'), ((959, 1002), 'numpy.ceil', 'np.ceil', (['(im_per_epoch / sequence.batch_size)'], {}), '(im_per_epoch / sequence.batch_size)\n', (966, 1002), True, 'import numpy as np\n'), ((2101, 2115), 'mpunet.logging.ScreenLogger', 'ScreenLogger', ([], {}), '()\n', (2113, 2115), False, 'from mpunet.logging import ScreenLogger\n'), ((3301, 3332), 'mpunet.train.utils.ensure_sparse', 'ensure_sparse', (['(metrics + losses)'], {}), '(metrics + losses)\n', (3314, 3332), False, 'from mpunet.train.utils import ensure_sparse, init_losses, init_metrics, init_optimizer\n'), ((6238, 6283), 'os.path.join', 'os.path.join', (['self.logger.base_path', '"""images"""'], {}), "(self.logger.base_path, 'images')\n", (6250, 6283), False, 'import os\n'), ((6296, 6341), 'mpunet.utils.plotting.save_images', 'save_images', (['train', 'val', 'im_path', 'self.logger'], {}), '(train, val, im_path, self.logger)\n', (6307, 6341), False, 'from mpunet.utils.plotting import save_images\n'), ((8540, 8591), 'mpunet.callbacks.remove_validation_callbacks', 'remove_validation_callbacks', (['callbacks', 'self.logger'], {}), '(callbacks, self.logger)\n', (8567, 8591), False, 'from mpunet.callbacks import SavePredictionImages, Validation, FGBatchBalancer, DividerLine, LearningCurve, MemoryConsumption, MeanReduceLogArrays, remove_validation_callbacks\n'), ((9017, 9131), 'mpunet.callbacks.Validation', 'Validation', (['val'], {'steps': 'val_steps', 'ignore_class_zero': 'val_ignore_class_zero', 'logger': 'self.logger', 'verbose': 'verbose'}), '(val, steps=val_steps, ignore_class_zero=val_ignore_class_zero,\n logger=self.logger, verbose=verbose)\n', (9027, 9131), False, 'from mpunet.callbacks import SavePredictionImages, Validation, FGBatchBalancer, DividerLine, LearningCurve, MemoryConsumption, MeanReduceLogArrays, remove_validation_callbacks\n'), ((9695, 9716), 'mpunet.callbacks.MeanReduceLogArrays', 'MeanReduceLogArrays', ([], {}), '()\n', (9714, 9716), False, 'from mpunet.callbacks import SavePredictionImages, Validation, FGBatchBalancer, DividerLine, LearningCurve, MemoryConsumption, MeanReduceLogArrays, remove_validation_callbacks\n'), ((9812, 9845), 'mpunet.callbacks.LearningCurve', 'LearningCurve', ([], {'logger': 'self.logger'}), '(logger=self.logger)\n', (9825, 9845), False, 'from mpunet.callbacks import SavePredictionImages, Validation, FGBatchBalancer, DividerLine, LearningCurve, MemoryConsumption, MeanReduceLogArrays, remove_validation_callbacks\n'), ((9872, 9896), 'mpunet.callbacks.DividerLine', 'DividerLine', (['self.logger'], {}), '(self.logger)\n', (9883, 9896), False, 'from mpunet.callbacks import SavePredictionImages, Validation, FGBatchBalancer, DividerLine, LearningCurve, MemoryConsumption, MeanReduceLogArrays, remove_validation_callbacks\n'), ((9505, 9547), 'mpunet.callbacks.FGBatchBalancer', 'FGBatchBalancer', (['train'], {'logger': 'self.logger'}), '(train, logger=self.logger)\n', (9520, 9547), False, 'from mpunet.callbacks import SavePredictionImages, Validation, FGBatchBalancer, DividerLine, LearningCurve, MemoryConsumption, MeanReduceLogArrays, remove_validation_callbacks\n'), ((9633, 9665), 'mpunet.callbacks.SavePredictionImages', 'SavePredictionImages', (['train', 'val'], {}), '(train, val)\n', (9653, 9665), False, 'from mpunet.callbacks import SavePredictionImages, Validation, FGBatchBalancer, DividerLine, LearningCurve, MemoryConsumption, MeanReduceLogArrays, remove_validation_callbacks\n')] |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tests for wave_fitting.py"""
import numpy
from .wave_fitting import prony, fit_known_frequencies
def test_prony_zeros():
signal = numpy.zeros(10)
amplitudes, phases = prony(signal)
assert (len(amplitudes) == 5)
assert (len(phases) == 5)
for j in range(5):
numpy.testing.assert_allclose(amplitudes[j], 0)
numpy.testing.assert_allclose(phases[j], 0)
def test_prony_signal():
x_vec = numpy.linspace(0, 1, 11)
y_vec = (0.5 * numpy.exp(1j * x_vec * 3) + 0.3 * numpy.exp(1j * x_vec * 5) +
0.15 * numpy.exp(1j * x_vec * 1.5) +
0.1 * numpy.exp(1j * x_vec * 4) +
0.05 * numpy.exp(1j * x_vec * 1.2))
print(y_vec)
amplitudes, phases = prony(y_vec)
assert (len(amplitudes) == 5)
assert (len(phases) == 5)
for a, p in zip(amplitudes, phases):
print(a, numpy.angle(p))
numpy.testing.assert_allclose(numpy.abs(amplitudes[0]), 0.5, atol=1e-4)
numpy.testing.assert_allclose(numpy.abs(amplitudes[1]), 0.3, atol=1e-4)
numpy.testing.assert_allclose(numpy.abs(amplitudes[2]), 0.15, atol=1e-4)
numpy.testing.assert_allclose(numpy.abs(amplitudes[3]), 0.1, atol=1e-4)
numpy.testing.assert_allclose(numpy.abs(amplitudes[4]), 0.05, atol=1e-4)
numpy.testing.assert_allclose(numpy.angle(phases[0]), 0.3, atol=1e-4)
numpy.testing.assert_allclose(numpy.angle(phases[1]), 0.5, atol=1e-4)
numpy.testing.assert_allclose(numpy.angle(phases[2]), 0.15, atol=1e-4)
numpy.testing.assert_allclose(numpy.angle(phases[3]), 0.4, atol=1e-4)
numpy.testing.assert_allclose(numpy.angle(phases[4]), 0.12, atol=1e-4)
def test_fitting_signal():
frequencies = numpy.array([0.4, 0.5, 0.8])
amplitudes = numpy.array([0.2, 0.4, 0.4])
times = numpy.linspace(0, 10, 21)
signal = numpy.array([
numpy.sum([
amp * numpy.exp(1j * time * freq)
for freq, amp in zip(frequencies, amplitudes)
])
for time in times
])
amplitudes_guess = fit_known_frequencies(signal, times, frequencies)
assert len(amplitudes_guess == 3)
for index in range(3):
assert numpy.isclose(amplitudes_guess[index], amplitudes[index])
| [
"numpy.abs",
"numpy.angle",
"numpy.zeros",
"numpy.isclose",
"numpy.array",
"numpy.exp",
"numpy.linspace",
"numpy.testing.assert_allclose"
] | [((703, 718), 'numpy.zeros', 'numpy.zeros', (['(10)'], {}), '(10)\n', (714, 718), False, 'import numpy\n'), ((992, 1016), 'numpy.linspace', 'numpy.linspace', (['(0)', '(1)', '(11)'], {}), '(0, 1, 11)\n', (1006, 1016), False, 'import numpy\n'), ((2238, 2266), 'numpy.array', 'numpy.array', (['[0.4, 0.5, 0.8]'], {}), '([0.4, 0.5, 0.8])\n', (2249, 2266), False, 'import numpy\n'), ((2284, 2312), 'numpy.array', 'numpy.array', (['[0.2, 0.4, 0.4]'], {}), '([0.2, 0.4, 0.4])\n', (2295, 2312), False, 'import numpy\n'), ((2325, 2350), 'numpy.linspace', 'numpy.linspace', (['(0)', '(10)', '(21)'], {}), '(0, 10, 21)\n', (2339, 2350), False, 'import numpy\n'), ((853, 900), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (['amplitudes[j]', '(0)'], {}), '(amplitudes[j], 0)\n', (882, 900), False, 'import numpy\n'), ((909, 952), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (['phases[j]', '(0)'], {}), '(phases[j], 0)\n', (938, 952), False, 'import numpy\n'), ((1471, 1495), 'numpy.abs', 'numpy.abs', (['amplitudes[0]'], {}), '(amplitudes[0])\n', (1480, 1495), False, 'import numpy\n'), ((1547, 1571), 'numpy.abs', 'numpy.abs', (['amplitudes[1]'], {}), '(amplitudes[1])\n', (1556, 1571), False, 'import numpy\n'), ((1623, 1647), 'numpy.abs', 'numpy.abs', (['amplitudes[2]'], {}), '(amplitudes[2])\n', (1632, 1647), False, 'import numpy\n'), ((1700, 1724), 'numpy.abs', 'numpy.abs', (['amplitudes[3]'], {}), '(amplitudes[3])\n', (1709, 1724), False, 'import numpy\n'), ((1776, 1800), 'numpy.abs', 'numpy.abs', (['amplitudes[4]'], {}), '(amplitudes[4])\n', (1785, 1800), False, 'import numpy\n'), ((1853, 1875), 'numpy.angle', 'numpy.angle', (['phases[0]'], {}), '(phases[0])\n', (1864, 1875), False, 'import numpy\n'), ((1927, 1949), 'numpy.angle', 'numpy.angle', (['phases[1]'], {}), '(phases[1])\n', (1938, 1949), False, 'import numpy\n'), ((2001, 2023), 'numpy.angle', 'numpy.angle', (['phases[2]'], {}), '(phases[2])\n', (2012, 2023), False, 'import numpy\n'), ((2076, 2098), 'numpy.angle', 'numpy.angle', (['phases[3]'], {}), '(phases[3])\n', (2087, 2098), False, 'import numpy\n'), ((2150, 2172), 'numpy.angle', 'numpy.angle', (['phases[4]'], {}), '(phases[4])\n', (2161, 2172), False, 'import numpy\n'), ((2699, 2756), 'numpy.isclose', 'numpy.isclose', (['amplitudes_guess[index]', 'amplitudes[index]'], {}), '(amplitudes_guess[index], amplitudes[index])\n', (2712, 2756), False, 'import numpy\n'), ((1215, 1244), 'numpy.exp', 'numpy.exp', (['(1.0j * x_vec * 1.2)'], {}), '(1.0j * x_vec * 1.2)\n', (1224, 1244), False, 'import numpy\n'), ((1421, 1435), 'numpy.angle', 'numpy.angle', (['p'], {}), '(p)\n', (1432, 1435), False, 'import numpy\n'), ((1167, 1194), 'numpy.exp', 'numpy.exp', (['(1.0j * x_vec * 4)'], {}), '(1.0j * x_vec * 4)\n', (1176, 1194), False, 'import numpy\n'), ((1118, 1147), 'numpy.exp', 'numpy.exp', (['(1.0j * x_vec * 1.5)'], {}), '(1.0j * x_vec * 1.5)\n', (1127, 1147), False, 'import numpy\n'), ((1036, 1063), 'numpy.exp', 'numpy.exp', (['(1.0j * x_vec * 3)'], {}), '(1.0j * x_vec * 3)\n', (1045, 1063), False, 'import numpy\n'), ((1070, 1097), 'numpy.exp', 'numpy.exp', (['(1.0j * x_vec * 5)'], {}), '(1.0j * x_vec * 5)\n', (1079, 1097), False, 'import numpy\n'), ((2416, 2445), 'numpy.exp', 'numpy.exp', (['(1.0j * time * freq)'], {}), '(1.0j * time * freq)\n', (2425, 2445), False, 'import numpy\n')] |
#!/usr/bin/env python3
from mayavi import mlab
mlab.options.offscreen = True
import numpy as np
from glob import glob
import pandas as pd
import os.path
import cv2
import sys
import skvideo.io
from tqdm import tqdm, trange
import sys
from collections import defaultdict
from matplotlib.pyplot import get_cmap
from .common import make_process_fun, get_nframes, get_video_name, get_video_params, get_data_length, natural_keys
def connect(points, bps, bp_dict, color):
ixs = [bp_dict[bp] for bp in bps]
return mlab.plot3d(points[ixs, 0], points[ixs, 1], points[ixs, 2],
np.ones(len(ixs)), reset_zoom=False,
color=color, tube_radius=None, line_width=10)
def connect_all(points, scheme, bp_dict, cmap):
lines = []
for i, bps in enumerate(scheme):
line = connect(points, bps, bp_dict, color=cmap(i)[:3])
lines.append(line)
return lines
def update_line(line, points, bps, bp_dict):
ixs = [bp_dict[bp] for bp in bps]
# ixs = [bodyparts.index(bp) for bp in bps]
new = np.vstack([points[ixs, 0], points[ixs, 1], points[ixs, 2]]).T
line.mlab_source.points = new
def update_all_lines(lines, points, scheme, bp_dict):
for line, bps in zip(lines, scheme):
update_line(line, points, bps, bp_dict)
def visualize_labels(config, labels_fname, outname, fps=300):
try:
scheme = config['labeling']['scheme']
except KeyError:
scheme = []
data = pd.read_csv(labels_fname)
cols = [x for x in data.columns if '_error' in x]
if len(scheme) == 0:
bodyparts = [c.replace('_error', '') for c in cols]
else:
bodyparts = sorted(set([x for dx in scheme for x in dx]))
bp_dict = dict(zip(bodyparts, range(len(bodyparts))))
all_points = np.array([np.array(data.loc[:, (bp+'_x', bp+'_y', bp+'_z')])
for bp in bodyparts], dtype='float64')
all_errors = np.array([np.array(data.loc[:, bp+'_error'])
for bp in bodyparts], dtype='float64')
all_scores = np.array([np.array(data.loc[:, bp+'_score'])
for bp in bodyparts], dtype='float64')
if config['triangulation']['optim']:
all_errors[np.isnan(all_errors)] = 0
else:
all_errors[np.isnan(all_errors)] = 10000
good = (all_errors < 100)
all_points[~good] = np.nan
all_points_flat = all_points.reshape(-1, 3)
check = ~np.isnan(all_points_flat[:, 0])
if np.sum(check) < 10:
print('too few points to plot, skipping...')
return
low, high = np.percentile(all_points_flat[check], [5, 95], axis=0)
nparts = len(bodyparts)
framedict = dict(zip(data['fnum'], data.index))
writer = skvideo.io.FFmpegWriter(outname, inputdict={
# '-hwaccel': 'auto',
'-framerate': str(fps),
}, outputdict={
'-vcodec': 'h264', '-qp': '28', '-pix_fmt': 'yuv420p'
})
cmap = get_cmap('tab10')
points = np.copy(all_points[:, 20])
points[0] = low
points[1] = high
s = np.arange(points.shape[0])
good = ~np.isnan(points[:, 0])
fig = mlab.figure(bgcolor=(1,1,1), size=(500,500))
fig.scene.anti_aliasing_frames = 2
low, high = np.percentile(points[good, 0], [10,90])
scale_factor = (high - low) / 12.0
mlab.clf()
pts = mlab.points3d(points[:, 0], points[:, 1], points[:, 2], s,
color=(0.8, 0.8, 0.8),
scale_mode='none', scale_factor=scale_factor)
lines = connect_all(points, scheme, bp_dict, cmap)
mlab.orientation_axes()
view = list(mlab.view())
mlab.view(focalpoint='auto', distance='auto')
for framenum in trange(data.shape[0], ncols=70):
fig.scene.disable_render = True
if framenum in framedict:
points = all_points[:, framenum]
else:
points = np.ones((nparts, 3))*np.nan
s = np.arange(points.shape[0])
good = ~np.isnan(points[:, 0])
new = np.vstack([points[:, 0], points[:, 1], points[:, 2]]).T
pts.mlab_source.points = new
update_all_lines(lines, points, scheme, bp_dict)
fig.scene.disable_render = False
img = mlab.screenshot()
mlab.view(*view, reset_roll=False)
writer.writeFrame(img)
mlab.close(all=True)
writer.close()
def process_session(config, session_path, filtered=False):
pipeline_videos_raw = config['pipeline']['videos_raw']
if filtered:
pipeline_videos_labeled_3d = config['pipeline']['videos_labeled_3d_filter']
pipeline_3d = config['pipeline']['pose_3d_filter']
else:
pipeline_videos_labeled_3d = config['pipeline']['videos_labeled_3d']
pipeline_3d = config['pipeline']['pose_3d']
video_ext = config['video_extension']
vid_fnames = glob(os.path.join(session_path,
pipeline_videos_raw, "*."+video_ext))
orig_fnames = defaultdict(list)
for vid in vid_fnames:
vidname = get_video_name(config, vid)
orig_fnames[vidname].append(vid)
labels_fnames = glob(os.path.join(session_path,
pipeline_3d, '*.csv'))
labels_fnames = sorted(labels_fnames, key=natural_keys)
outdir = os.path.join(session_path, pipeline_videos_labeled_3d)
if len(labels_fnames) > 0:
os.makedirs(outdir, exist_ok=True)
for fname in labels_fnames:
basename = os.path.basename(fname)
basename = os.path.splitext(basename)[0]
out_fname = os.path.join(outdir, basename+'.mp4')
if os.path.exists(out_fname) and \
abs(get_nframes(out_fname) - get_data_length(fname)) < 100:
continue
print(out_fname)
some_vid = orig_fnames[basename][0]
params = get_video_params(some_vid)
visualize_labels(config, fname, out_fname, params['fps'])
label_videos_3d_all = make_process_fun(process_session, filtered=False)
label_videos_3d_filtered_all = make_process_fun(process_session, filtered=True)
| [
"mayavi.mlab.figure",
"numpy.sum",
"pandas.read_csv",
"numpy.ones",
"numpy.isnan",
"collections.defaultdict",
"numpy.arange",
"numpy.copy",
"mayavi.mlab.points3d",
"matplotlib.pyplot.get_cmap",
"tqdm.trange",
"mayavi.mlab.clf",
"numpy.percentile",
"mayavi.mlab.close",
"mayavi.mlab.view",... | [((1474, 1499), 'pandas.read_csv', 'pd.read_csv', (['labels_fname'], {}), '(labels_fname)\n', (1485, 1499), True, 'import pandas as pd\n'), ((2597, 2651), 'numpy.percentile', 'np.percentile', (['all_points_flat[check]', '[5, 95]'], {'axis': '(0)'}), '(all_points_flat[check], [5, 95], axis=0)\n', (2610, 2651), True, 'import numpy as np\n'), ((2955, 2972), 'matplotlib.pyplot.get_cmap', 'get_cmap', (['"""tab10"""'], {}), "('tab10')\n", (2963, 2972), False, 'from matplotlib.pyplot import get_cmap\n'), ((2988, 3014), 'numpy.copy', 'np.copy', (['all_points[:, 20]'], {}), '(all_points[:, 20])\n', (2995, 3014), True, 'import numpy as np\n'), ((3065, 3091), 'numpy.arange', 'np.arange', (['points.shape[0]'], {}), '(points.shape[0])\n', (3074, 3091), True, 'import numpy as np\n'), ((3138, 3185), 'mayavi.mlab.figure', 'mlab.figure', ([], {'bgcolor': '(1, 1, 1)', 'size': '(500, 500)'}), '(bgcolor=(1, 1, 1), size=(500, 500))\n', (3149, 3185), False, 'from mayavi import mlab\n'), ((3239, 3279), 'numpy.percentile', 'np.percentile', (['points[good, 0]', '[10, 90]'], {}), '(points[good, 0], [10, 90])\n', (3252, 3279), True, 'import numpy as np\n'), ((3323, 3333), 'mayavi.mlab.clf', 'mlab.clf', ([], {}), '()\n', (3331, 3333), False, 'from mayavi import mlab\n'), ((3344, 3475), 'mayavi.mlab.points3d', 'mlab.points3d', (['points[:, 0]', 'points[:, 1]', 'points[:, 2]', 's'], {'color': '(0.8, 0.8, 0.8)', 'scale_mode': '"""none"""', 'scale_factor': 'scale_factor'}), "(points[:, 0], points[:, 1], points[:, 2], s, color=(0.8, 0.8,\n 0.8), scale_mode='none', scale_factor=scale_factor)\n", (3357, 3475), False, 'from mayavi import mlab\n'), ((3579, 3602), 'mayavi.mlab.orientation_axes', 'mlab.orientation_axes', ([], {}), '()\n', (3600, 3602), False, 'from mayavi import mlab\n'), ((3638, 3683), 'mayavi.mlab.view', 'mlab.view', ([], {'focalpoint': '"""auto"""', 'distance': '"""auto"""'}), "(focalpoint='auto', distance='auto')\n", (3647, 3683), False, 'from mayavi import mlab\n'), ((3705, 3736), 'tqdm.trange', 'trange', (['data.shape[0]'], {'ncols': '(70)'}), '(data.shape[0], ncols=70)\n', (3711, 3736), False, 'from tqdm import tqdm, trange\n'), ((4321, 4341), 'mayavi.mlab.close', 'mlab.close', ([], {'all': '(True)'}), '(all=True)\n', (4331, 4341), False, 'from mayavi import mlab\n'), ((4966, 4983), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4977, 4983), False, 'from collections import defaultdict\n'), ((1060, 1119), 'numpy.vstack', 'np.vstack', (['[points[ixs, 0], points[ixs, 1], points[ixs, 2]]'], {}), '([points[ixs, 0], points[ixs, 1], points[ixs, 2]])\n', (1069, 1119), True, 'import numpy as np\n'), ((2448, 2479), 'numpy.isnan', 'np.isnan', (['all_points_flat[:, 0]'], {}), '(all_points_flat[:, 0])\n', (2456, 2479), True, 'import numpy as np\n'), ((2488, 2501), 'numpy.sum', 'np.sum', (['check'], {}), '(check)\n', (2494, 2501), True, 'import numpy as np\n'), ((3104, 3126), 'numpy.isnan', 'np.isnan', (['points[:, 0]'], {}), '(points[:, 0])\n', (3112, 3126), True, 'import numpy as np\n'), ((3620, 3631), 'mayavi.mlab.view', 'mlab.view', ([], {}), '()\n', (3629, 3631), False, 'from mayavi import mlab\n'), ((3934, 3960), 'numpy.arange', 'np.arange', (['points.shape[0]'], {}), '(points.shape[0])\n', (3943, 3960), True, 'import numpy as np\n'), ((4222, 4239), 'mayavi.mlab.screenshot', 'mlab.screenshot', ([], {}), '()\n', (4237, 4239), False, 'from mayavi import mlab\n'), ((4249, 4283), 'mayavi.mlab.view', 'mlab.view', (['*view'], {'reset_roll': '(False)'}), '(*view, reset_roll=False)\n', (4258, 4283), False, 'from mayavi import mlab\n'), ((1803, 1859), 'numpy.array', 'np.array', (["data.loc[:, (bp + '_x', bp + '_y', bp + '_z')]"], {}), "(data.loc[:, (bp + '_x', bp + '_y', bp + '_z')])\n", (1811, 1859), True, 'import numpy as np\n'), ((1948, 1984), 'numpy.array', 'np.array', (["data.loc[:, bp + '_error']"], {}), "(data.loc[:, bp + '_error'])\n", (1956, 1984), True, 'import numpy as np\n'), ((2077, 2113), 'numpy.array', 'np.array', (["data.loc[:, bp + '_score']"], {}), "(data.loc[:, bp + '_score'])\n", (2085, 2113), True, 'import numpy as np\n'), ((2240, 2260), 'numpy.isnan', 'np.isnan', (['all_errors'], {}), '(all_errors)\n', (2248, 2260), True, 'import numpy as np\n'), ((2295, 2315), 'numpy.isnan', 'np.isnan', (['all_errors'], {}), '(all_errors)\n', (2303, 2315), True, 'import numpy as np\n'), ((3977, 3999), 'numpy.isnan', 'np.isnan', (['points[:, 0]'], {}), '(points[:, 0])\n', (3985, 3999), True, 'import numpy as np\n'), ((4015, 4068), 'numpy.vstack', 'np.vstack', (['[points[:, 0], points[:, 1], points[:, 2]]'], {}), '([points[:, 0], points[:, 1], points[:, 2]])\n', (4024, 4068), True, 'import numpy as np\n'), ((3893, 3913), 'numpy.ones', 'np.ones', (['(nparts, 3)'], {}), '((nparts, 3))\n', (3900, 3913), True, 'import numpy as np\n')] |
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import json
import pandas as pd
import sys
from os.path import exists
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
#__________________________________________________________
def plot(val,date,ind,i):
f = plt.figure()
ax = f.add_subplot(111)
plt.xlabel('indicator reliability score')
plt.hist(val, 6, range=[0,6], facecolor='g', alpha=0.4)
plt.text(0.02, 1.02,'indicator={}'.format(ind),transform = ax.transAxes)
plt.text(0.02, 0.95, r'$\mu={:.3f},\ \sigma={:.3f}$'.format(np.mean(val),np.std(val)),transform = ax.transAxes)
plt.text(0.02, 0.9,'number of crises={}'.format(len(val)),transform = ax.transAxes)
plt.text(0.02, 0.85,'date={}'.format(date),transform = ax.transAxes)
plt.savefig('plots/question2/indicators_reliability_score_months/indicator{}_{}.png'.format(i,date))
plt.savefig('plots/question2/indicators_reliability_score_months/indicator{}_{}.pdf'.format(i,date))
plt.close()
#__________________________________________________________
def plotcrisis(val,date,ind,i,c):
f = plt.figure()
ax = f.add_subplot(111)
ax.set_ylim([0., 5.5])
plt.ylabel('reliability score')
plt.text(0.02, 1.02,'indicator={}'.format(ind),transform = ax.transAxes)
plt.text(0.02, 0.95, r'$\mu={:.3f},\ \sigma={:.3f}$'.format(np.mean(val),np.std(val)),transform = ax.transAxes)
plt.text(0.02, 0.9,'crisi id={}'.format(c),transform = ax.transAxes)
plt.plot(date, val, marker='o', ms=5, color='b')
plt.xticks(fontsize=8,rotation=45)
plt.grid(True, alpha=0.2,color='g')
plt.savefig('plots/question2/indicators_reliability_score_months_crisis/indicator{}_{}.png'.format(i,c))
plt.savefig('plots/question2/indicators_reliability_score_months_crisis/indicator{}_{}.pdf'.format(i,c))
plt.close()
#__________________________________________________________
def ploterr(mean,std,num,date,ind,i):
f, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.errorbar(date, mean, yerr=std, marker='o', ms=5, color='b')
ax2.plot(date, num, 'r-')
ax1.set_xticks(ax1.get_xticks())
ax1.set_xticklabels(date, rotation=45, fontsize=8)
ax1.set_ylabel('indicator reliability score (mean value)', color='b')
ax1.set_ylim([0., 4.])
ax2.set_ylim([0.,max(num)*1.1])
ax2.set_ylabel('number of crises', color='r')
ax1.text(0.02, 1.02,'indicator={}'.format(ind),transform = ax1.transAxes)
ax1.grid(True,alpha=0.2,color='g')
plt.savefig('plots/question2/indicators_reliability_score_vs_time/indicator{}_vs_time.png'.format(i))
plt.savefig('plots/question2/indicators_reliability_score_vs_time/indicator{}_vs_time.pdf'.format(i))
plt.close()
#__________________________________________________________
def run(fname):
# loading data into json dict and df
f = open(fname)
data = json.load(f)
df = pd.DataFrame()
df = df.append(pd.DataFrame(data["results"]))
#build sub_df with less info
sub_df = df[['crisis_id','source_and_date','date_of_entry','reliability', 'reliability_score','indicator']]
#filter date
sub_df = sub_df[sub_df['date_of_entry'] > '2020-04-01']
#filter null values
sub_df=sub_df[sub_df.reliability_score.notnull()]
sub_df=sub_df[sub_df.indicator.notnull()]
#build range of dates (not very elegant, but does the job)
nmonths=18
starting_date='2021-10-01'
ending_date=starting_date.split('-')[0]+'-'+starting_date.split('-')[1]+'-31'
starting_date_list=[]
ending_date_list=[]
for i in range(nmonths):
starting_date_list.append(starting_date)
ending_date_list.append(ending_date)
if int(starting_date.split('-')[1])>1:
month=int(starting_date.split('-')[1])-1
if month<10:month='0'+str(month)
starting_date = starting_date.split('-')[0]+'-'+str(month)+'-01'
else:
year=int(starting_date.split('-')[0])-1
starting_date = str(year)+'-12-01'
ending_date=starting_date.split('-')[0]+'-'+starting_date.split('-')[1]+'-31'
#chronological order
starting_date_list=list(reversed(starting_date_list))
ending_date_list=list(reversed(ending_date_list))
#loop over indicators, slice in months and plot mean/rms/num for all crises
indicator_list = sub_df.indicator.unique().tolist()
crisis_list = sub_df.crisis_id.unique().tolist()
counter=0
for i in indicator_list:
mean=[]
std=[]
date=[]
num=[]
for m in range(nmonths):
#sub df per month
sub_df_month = sub_df[(sub_df['date_of_entry'] > starting_date_list[m]) & (sub_df['date_of_entry'] < ending_date_list[m])]
#get indicator i
sub_df_month_id = sub_df_month[sub_df_month['indicator'] == i]
#filter one crisis
sub_df_month_id = sub_df_month_id.drop_duplicates(subset=['crisis_id'], keep=False)
#list the score of the indicator i
list_score = sub_df_month_id.reliability_score.tolist()
if len(list_score)>0:
mean.append(np.mean(list_score))
std.append(np.std(list_score))
date.append(ending_date_list[m].replace('-31',''))
num.append(len(list_score))
plot(list_score,date[-1],i,counter)
ploterr(mean,std,num,date, i, counter)
counter+=1
#loop over indicators AND crises, slice in months and plot mean/rms/num for all crises
counter=0
for i in indicator_list:
#sub df per indicator
sub_df_id = sub_df[sub_df['indicator'] == i]
for c in crisis_list:
#sub df per crisis
sub_df_id_crisis = sub_df_id[sub_df_id['crisis_id'] == c]
value=[]
date=[]
for m in range(nmonths):
#sub df per month
sub_df_id_crisis_month = sub_df_id_crisis[(sub_df_id_crisis['date_of_entry'] > starting_date_list[m]) & (sub_df_id_crisis['date_of_entry'] < ending_date_list[m])]
list_score_crisis = sub_df_id_crisis_month.reliability_score.tolist()
if len(list_score_crisis)==1:
value.append(list_score_crisis[0])
date.append(ending_date_list[m].replace('-31',''))
if len(value)>0:
diff=max(value)-min(value)
if diff>1.9:
plotcrisis(value,date,i,counter,c)
counter+=1
#__________________________________________________________
if __name__ == "__main__":
#check arguments
if len(sys.argv)!=2:
print ("usage:")
print ("python ",sys.argv[0]," file.json")
print ("For example: python ",sys.argv[0]," data/isi_log.json")
sys.exit(3)
#check file exists
if not exists(sys.argv[1]):
print ('file does not exists')
sys.exit(3)
#run analysis
run(sys.argv[1])
| [
"pandas.DataFrame",
"json.load",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.plot",
"numpy.std",
"matplotlib.pyplot.close",
"os.path.exists",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.figure",
"numpy.mean",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xl... | [((291, 303), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (301, 303), True, 'import matplotlib.pyplot as plt\n'), ((336, 377), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""indicator reliability score"""'], {}), "('indicator reliability score')\n", (346, 377), True, 'import matplotlib.pyplot as plt\n'), ((382, 438), 'matplotlib.pyplot.hist', 'plt.hist', (['val', '(6)'], {'range': '[0, 6]', 'facecolor': '"""g"""', 'alpha': '(0.4)'}), "(val, 6, range=[0, 6], facecolor='g', alpha=0.4)\n", (390, 438), True, 'import matplotlib.pyplot as plt\n'), ((1006, 1017), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1015, 1017), True, 'import matplotlib.pyplot as plt\n'), ((1123, 1135), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1133, 1135), True, 'import matplotlib.pyplot as plt\n'), ((1195, 1226), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""reliability score"""'], {}), "('reliability score')\n", (1205, 1226), True, 'import matplotlib.pyplot as plt\n'), ((1503, 1551), 'matplotlib.pyplot.plot', 'plt.plot', (['date', 'val'], {'marker': '"""o"""', 'ms': '(5)', 'color': '"""b"""'}), "(date, val, marker='o', ms=5, color='b')\n", (1511, 1551), True, 'import matplotlib.pyplot as plt\n'), ((1556, 1591), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(8)', 'rotation': '(45)'}), '(fontsize=8, rotation=45)\n', (1566, 1591), True, 'import matplotlib.pyplot as plt\n'), ((1595, 1631), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {'alpha': '(0.2)', 'color': '"""g"""'}), "(True, alpha=0.2, color='g')\n", (1603, 1631), True, 'import matplotlib.pyplot as plt\n'), ((1854, 1865), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1863, 1865), True, 'import matplotlib.pyplot as plt\n'), ((1983, 1997), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1995, 1997), True, 'import matplotlib.pyplot as plt\n'), ((2747, 2758), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2756, 2758), True, 'import matplotlib.pyplot as plt\n'), ((2908, 2920), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2917, 2920), False, 'import json\n'), ((2930, 2944), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2942, 2944), True, 'import pandas as pd\n'), ((2964, 2993), 'pandas.DataFrame', 'pd.DataFrame', (["data['results']"], {}), "(data['results'])\n", (2976, 2993), True, 'import pandas as pd\n'), ((6837, 6848), 'sys.exit', 'sys.exit', (['(3)'], {}), '(3)\n', (6845, 6848), False, 'import sys\n'), ((6892, 6911), 'os.path.exists', 'exists', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (6898, 6911), False, 'from os.path import exists\n'), ((6960, 6971), 'sys.exit', 'sys.exit', (['(3)'], {}), '(3)\n', (6968, 6971), False, 'import sys\n'), ((579, 591), 'numpy.mean', 'np.mean', (['val'], {}), '(val)\n', (586, 591), True, 'import numpy as np\n'), ((592, 603), 'numpy.std', 'np.std', (['val'], {}), '(val)\n', (598, 603), True, 'import numpy as np\n'), ((1369, 1381), 'numpy.mean', 'np.mean', (['val'], {}), '(val)\n', (1376, 1381), True, 'import numpy as np\n'), ((1382, 1393), 'numpy.std', 'np.std', (['val'], {}), '(val)\n', (1388, 1393), True, 'import numpy as np\n'), ((5175, 5194), 'numpy.mean', 'np.mean', (['list_score'], {}), '(list_score)\n', (5182, 5194), True, 'import numpy as np\n'), ((5223, 5241), 'numpy.std', 'np.std', (['list_score'], {}), '(list_score)\n', (5229, 5241), True, 'import numpy as np\n')] |
"""Preprocess the us census bridged race statistics dataset."""
import io
import numpy as np
import pandas as pd
def _fixed_width_to_csv(file, blocks):
slices = [slice(*b) for b in blocks]
with open(file, "r") as f:
data = f.readlines()
fixed_lines = [",".join([line[s].strip() for s in slices]) for line in data]
return io.StringIO("\n".join(fixed_lines))
def bin_census_data(filename, out_filename):
"""Group ages in the Census csv to match the bins used by Prem et al.
Parameters
----------
filename : bucky._typing.PathLike
Unmodified Census CSV
out_filename : bucky._typing.PathLike
Output file for binned data
"""
# Preprocess the file and add delimiters because idk who invented this fixed width format...
# see pg 17 of https://www.cdc.gov/nchs/data/nvss/bridged_race/Documentation-Bridged-PostcenV2020.pdf
# if you want your brain to melt
blocks = [(4, 9), (9, 11), (101, 109)]
csv_f_obj = _fixed_width_to_csv(filename, blocks)
df = pd.read_csv(csv_f_obj, names=["adm2", "age", "N"], dtype=int)
df["age_group"] = pd.cut(df["age"], np.append(np.arange(0, 76, 5), 120), right=False)
df = df.groupby(["adm2", "age_group"]).sum()[["N"]].squeeze().unstack("age_group")
# add in territory data (included with bucky)
territory_df = pd.read_csv("../included_data/population/territory_pop.csv", index_col="adm2")
territory_df.columns = df.columns
df = pd.concat([df, territory_df])
df.to_csv(out_filename)
| [
"pandas.read_csv",
"numpy.arange",
"pandas.concat"
] | [((1038, 1099), 'pandas.read_csv', 'pd.read_csv', (['csv_f_obj'], {'names': "['adm2', 'age', 'N']", 'dtype': 'int'}), "(csv_f_obj, names=['adm2', 'age', 'N'], dtype=int)\n", (1049, 1099), True, 'import pandas as pd\n'), ((1348, 1426), 'pandas.read_csv', 'pd.read_csv', (['"""../included_data/population/territory_pop.csv"""'], {'index_col': '"""adm2"""'}), "('../included_data/population/territory_pop.csv', index_col='adm2')\n", (1359, 1426), True, 'import pandas as pd\n'), ((1475, 1504), 'pandas.concat', 'pd.concat', (['[df, territory_df]'], {}), '([df, territory_df])\n', (1484, 1504), True, 'import pandas as pd\n'), ((1150, 1169), 'numpy.arange', 'np.arange', (['(0)', '(76)', '(5)'], {}), '(0, 76, 5)\n', (1159, 1169), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import numpy as np
class Q1(nn.Module):
def __init__(self, in_ch, out_ch):
super(Q1, self).__init__()
self.mask = torch.from_numpy(np.array([[1, 1, 0], [1, 0, 0], [0, 0, 0]], dtype=np.float32)).cuda()
self.conv1 = nn.Conv2d(in_channels=in_ch, out_channels=out_ch, padding=1, kernel_size=3)
def forward(self, x):
self.mask=self.mask.to(self.conv1.weight.device)
self.conv1.weight.data = self.conv1.weight * self.mask
x = self.conv1(x)
return x
class Q2(nn.Module):
def __init__(self, in_ch, out_ch, dilated_value):
super(Q2, self).__init__()
self.mask = torch.from_numpy(np.array([[1, 1, 1], [1, 1, 0], [1, 0, 0]], dtype=np.float32)).cuda()
self.conv1 = nn.Conv2d(in_channels=in_ch, out_channels=out_ch, kernel_size=3, padding=dilated_value,
dilation=dilated_value)
def forward(self, x):
self.mask=self.mask.to(self.conv1.weight.device)
self.conv1.weight.data = self.conv1.weight * self.mask
x = self.conv1(x)
return x
class E1(nn.Module):
def __init__(self, in_ch, out_ch):
super(E1, self).__init__()
self.mask = torch.from_numpy(np.array([[0, 1, 1], [0, 0, 1], [0, 0, 0]], dtype=np.float32)).cuda()
self.conv1 = nn.Conv2d(in_channels=in_ch, out_channels=out_ch, padding=1, kernel_size=3)
def forward(self, x):
self.mask=self.mask.to(self.conv1.weight.device)
self.conv1.weight.data = self.conv1.weight * self.mask
x = self.conv1(x)
return x
class E2(nn.Module):
def __init__(self, in_ch, out_ch, dilated_value):
super(E2, self).__init__()
self.mask = torch.from_numpy(np.array([[1, 1, 1], [0, 1, 1], [0, 0, 1]], dtype=np.float32)).cuda()
self.conv1 = nn.Conv2d(in_channels=in_ch, out_channels=out_ch, kernel_size=3, padding=dilated_value,
dilation=dilated_value)
def forward(self, x):
self.mask=self.mask.to(self.conv1.weight.device)
self.conv1.weight.data = self.conv1.weight * self.mask
x = self.conv1(x)
return x
class D1(nn.Module):
def __init__(self, in_ch, out_ch):
super(D1, self).__init__()
self.mask = torch.from_numpy(np.array([[0, 0, 0], [0, 0, 0], [1, 1, 1]], dtype=np.float32)).cuda()
self.conv1 = nn.Conv2d(in_channels=in_ch, out_channels=out_ch, padding=1, kernel_size=3)
def forward(self, x):
self.mask=self.mask.to(self.conv1.weight.device)
self.conv1.weight.data = self.conv1.weight * self.mask
x = self.conv1(x)
return x
class D2(nn.Module):
def __init__(self, in_ch, out_ch, dilated_value):
super(D2, self).__init__()
self.mask = torch.from_numpy(np.array([[0, 0, 0], [1, 1, 1], [1, 1, 1]], dtype=np.float32)).cuda()
self.conv1 = nn.Conv2d(in_channels=in_ch, out_channels=out_ch, kernel_size=3, padding=dilated_value,
dilation=dilated_value)
def forward(self, x):
self.mask=self.mask.to(self.conv1.weight.device)
self.conv1.weight.data = self.conv1.weight * self.mask
x = self.conv1(x)
return x
class QED_first_layer(nn.Module):
def __init__(self, in_ch, out_ch):
super(QED_first_layer, self).__init__()
self.q1 = Q1(in_ch, out_ch)
self.e1 = E1(in_ch, out_ch)
self.d1 = D1(in_ch, out_ch)
def forward(self, x):
outputs = []
outputs.append(self.q1(x))
outputs.append(self.e1(x))
outputs.append(self.d1(x))
return outputs
class QED_layer(nn.Module):
def __init__(self, in_ch, out_ch, dilated_value):
super(QED_layer, self).__init__()
self.q2_prelu = nn.PReLU(in_ch, 0).cuda()
self.e2_prelu = nn.PReLU(in_ch, 0).cuda()
self.d2_prelu = nn.PReLU(in_ch, 0).cuda()
self.q2 = Q2(in_ch, out_ch, dilated_value)
self.e2 = E2(in_ch, out_ch, dilated_value)
self.d2 = D2(in_ch, out_ch, dilated_value)
def forward(self, inputs):
outputs = []
out_q2 = self.q2_prelu(inputs[0])
out_e2 = self.e2_prelu(inputs[1])
out_d2 = self.d2_prelu(inputs[2])
outputs.append(self.q2(out_q2))
outputs.append(self.e2(out_e2))
outputs.append(self.d2(out_d2))
return outputs
class Average_layer(nn.Module):
def __init__(self, in_ch):
super(Average_layer, self).__init__()
self.prelu = nn.PReLU(in_ch, 0).cuda()
def forward(self, inputs):
mean = torch.mean(torch.stack(inputs), dim=0)
# mean = torch.mean(inputs, dim=0, keepdim = True)
output = self.prelu(mean)
return output
class Residual_module(nn.Module):
def __init__(self, in_ch):
super(Residual_module, self).__init__()
self.prelu1 = nn.PReLU(in_ch, 0).cuda()
self.prelu2 = nn.PReLU(in_ch, 0).cuda()
# self.prelu3 = nn.PReLU(in_ch).cuda()
self.conv1_1by1 = nn.Conv2d(in_channels=in_ch, out_channels=in_ch, kernel_size=1)
self.conv2_1by1 = nn.Conv2d(in_channels=in_ch, out_channels=in_ch, kernel_size=1)
def forward(self, input):
# output = self.prelu1(input)
output_residual = self.conv1_1by1(input)
output_residual = self.prelu1(output_residual)
output_residual = self.conv2_1by1(output_residual)
output = torch.mean(torch.stack([input, output_residual]), dim=0)
output = self.prelu2(output)
return output
# class QED_first_layer(nn.Module):
# def __init__(self, in_ch, out_ch):
# super(QED_first_layer, self).__init__()
# self.q1 = Q1(in_ch,out_ch)
# self.e1 = E1(in_ch,out_ch)
# self.d1 = D1(in_ch,out_ch)
# def forward(self, x):
# outputs = []
# outputs = torch.cat((self.q1(x), self.e1(x), self.d1(x)), )
# return outputs
# class QED_layer(nn.Module):
# def __init__(self, in_ch, out_ch, dilated_value):
# super(QED_layer, self).__init__()
# self.q2_prelu = nn.PReLU(in_ch,0).cuda()
# self.e2_prelu = nn.PReLU(in_ch,0).cuda()
# self.d2_prelu = nn.PReLU(in_ch,0).cuda()
# self.q2 = Q2(in_ch,out_ch,dilated_value)
# self.e2 = E2(in_ch,out_ch,dilated_value)
# self.d2 = D2(in_ch,out_ch,dilated_value)
# def forward(self, inputs):
# out_q2 = self.q2_prelu(self.q2(inputs[:1]))
# out_e2 = self.e2_prelu(self.e2(inputs[1:2]))
# out_d2 = self.d2_prelu(self.d2(inputs[2:3]))
# outputs = torch.cat((out_q2, out_e2, out_d2), )
# return outputs
# class Average_layer(nn.Module):
# def __init__(self, in_ch):
# super(Average_layer, self).__init__()
# self.prelu = nn.PReLU(in_ch,0).cuda()
# def forward(self, inputs):
# mean = torch.mean(torch.stack(inputs), dim=0)
# # mean = torch.mean(inputs, dim=0, keepdim = True)
# output = self.prelu(mean)
# return output | [
"torch.nn.PReLU",
"torch.nn.Conv2d",
"numpy.array",
"torch.stack"
] | [((280, 355), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_ch', 'out_channels': 'out_ch', 'padding': '(1)', 'kernel_size': '(3)'}), '(in_channels=in_ch, out_channels=out_ch, padding=1, kernel_size=3)\n', (289, 355), True, 'import torch.nn as nn\n'), ((787, 903), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_ch', 'out_channels': 'out_ch', 'kernel_size': '(3)', 'padding': 'dilated_value', 'dilation': 'dilated_value'}), '(in_channels=in_ch, out_channels=out_ch, kernel_size=3, padding=\n dilated_value, dilation=dilated_value)\n', (796, 903), True, 'import torch.nn as nn\n'), ((1346, 1421), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_ch', 'out_channels': 'out_ch', 'padding': '(1)', 'kernel_size': '(3)'}), '(in_channels=in_ch, out_channels=out_ch, padding=1, kernel_size=3)\n', (1355, 1421), True, 'import torch.nn as nn\n'), ((1853, 1969), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_ch', 'out_channels': 'out_ch', 'kernel_size': '(3)', 'padding': 'dilated_value', 'dilation': 'dilated_value'}), '(in_channels=in_ch, out_channels=out_ch, kernel_size=3, padding=\n dilated_value, dilation=dilated_value)\n', (1862, 1969), True, 'import torch.nn as nn\n'), ((2412, 2487), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_ch', 'out_channels': 'out_ch', 'padding': '(1)', 'kernel_size': '(3)'}), '(in_channels=in_ch, out_channels=out_ch, padding=1, kernel_size=3)\n', (2421, 2487), True, 'import torch.nn as nn\n'), ((2919, 3035), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_ch', 'out_channels': 'out_ch', 'kernel_size': '(3)', 'padding': 'dilated_value', 'dilation': 'dilated_value'}), '(in_channels=in_ch, out_channels=out_ch, kernel_size=3, padding=\n dilated_value, dilation=dilated_value)\n', (2928, 3035), True, 'import torch.nn as nn\n'), ((5081, 5144), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_ch', 'out_channels': 'in_ch', 'kernel_size': '(1)'}), '(in_channels=in_ch, out_channels=in_ch, kernel_size=1)\n', (5090, 5144), True, 'import torch.nn as nn\n'), ((5171, 5234), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_ch', 'out_channels': 'in_ch', 'kernel_size': '(1)'}), '(in_channels=in_ch, out_channels=in_ch, kernel_size=1)\n', (5180, 5234), True, 'import torch.nn as nn\n'), ((4635, 4654), 'torch.stack', 'torch.stack', (['inputs'], {}), '(inputs)\n', (4646, 4654), False, 'import torch\n'), ((5505, 5542), 'torch.stack', 'torch.stack', (['[input, output_residual]'], {}), '([input, output_residual])\n', (5516, 5542), False, 'import torch\n'), ((3813, 3831), 'torch.nn.PReLU', 'nn.PReLU', (['in_ch', '(0)'], {}), '(in_ch, 0)\n', (3821, 3831), True, 'import torch.nn as nn\n'), ((3863, 3881), 'torch.nn.PReLU', 'nn.PReLU', (['in_ch', '(0)'], {}), '(in_ch, 0)\n', (3871, 3881), True, 'import torch.nn as nn\n'), ((3913, 3931), 'torch.nn.PReLU', 'nn.PReLU', (['in_ch', '(0)'], {}), '(in_ch, 0)\n', (3921, 3931), True, 'import torch.nn as nn\n'), ((4551, 4569), 'torch.nn.PReLU', 'nn.PReLU', (['in_ch', '(0)'], {}), '(in_ch, 0)\n', (4559, 4569), True, 'import torch.nn as nn\n'), ((4925, 4943), 'torch.nn.PReLU', 'nn.PReLU', (['in_ch', '(0)'], {}), '(in_ch, 0)\n', (4933, 4943), True, 'import torch.nn as nn\n'), ((4973, 4991), 'torch.nn.PReLU', 'nn.PReLU', (['in_ch', '(0)'], {}), '(in_ch, 0)\n', (4981, 4991), True, 'import torch.nn as nn\n'), ((189, 250), 'numpy.array', 'np.array', (['[[1, 1, 0], [1, 0, 0], [0, 0, 0]]'], {'dtype': 'np.float32'}), '([[1, 1, 0], [1, 0, 0], [0, 0, 0]], dtype=np.float32)\n', (197, 250), True, 'import numpy as np\n'), ((696, 757), 'numpy.array', 'np.array', (['[[1, 1, 1], [1, 1, 0], [1, 0, 0]]'], {'dtype': 'np.float32'}), '([[1, 1, 1], [1, 1, 0], [1, 0, 0]], dtype=np.float32)\n', (704, 757), True, 'import numpy as np\n'), ((1255, 1316), 'numpy.array', 'np.array', (['[[0, 1, 1], [0, 0, 1], [0, 0, 0]]'], {'dtype': 'np.float32'}), '([[0, 1, 1], [0, 0, 1], [0, 0, 0]], dtype=np.float32)\n', (1263, 1316), True, 'import numpy as np\n'), ((1762, 1823), 'numpy.array', 'np.array', (['[[1, 1, 1], [0, 1, 1], [0, 0, 1]]'], {'dtype': 'np.float32'}), '([[1, 1, 1], [0, 1, 1], [0, 0, 1]], dtype=np.float32)\n', (1770, 1823), True, 'import numpy as np\n'), ((2321, 2382), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 0, 0], [1, 1, 1]]'], {'dtype': 'np.float32'}), '([[0, 0, 0], [0, 0, 0], [1, 1, 1]], dtype=np.float32)\n', (2329, 2382), True, 'import numpy as np\n'), ((2828, 2889), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 1, 1], [1, 1, 1]]'], {'dtype': 'np.float32'}), '([[0, 0, 0], [1, 1, 1], [1, 1, 1]], dtype=np.float32)\n', (2836, 2889), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""odeint.py: Demonstrate solving an ordinary differential equation by using
odeint.
References:
* Solving Ordinary Differential Equations (ODEs) using Python
"""
from scipy.integrate import odeint
import numpy as np
import matplotlib.pyplot as plt
# pylint: disable=invalid-name
# Solve y''(t) + a y'(t) + b y(t) == 0.
# pylint: disable=unused-argument
def deriv(y, t):
"""Return derivatives of the array y."""
a = 3.0
b = 2.0
# y[0] : y'
# y[1] : y''
return np.array([
y[1], # (y[0])'
-(a * y[0] + b * y[1]) # (y[1])'
])
time = np.linspace(0.0, 10.0, 1000)
yinit = np.array([0.0005, 0.2]) # initial values
y = odeint(deriv, yinit, time)
plt.figure()
# y[:,0] is the first column of y
plt.plot(time, y[:, 0], color='deeppink')
plt.xlabel("t")
plt.ylabel("y")
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"scipy.integrate.odeint",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((607, 635), 'numpy.linspace', 'np.linspace', (['(0.0)', '(10.0)', '(1000)'], {}), '(0.0, 10.0, 1000)\n', (618, 635), True, 'import numpy as np\n'), ((644, 667), 'numpy.array', 'np.array', (['[0.0005, 0.2]'], {}), '([0.0005, 0.2])\n', (652, 667), True, 'import numpy as np\n'), ((689, 715), 'scipy.integrate.odeint', 'odeint', (['deriv', 'yinit', 'time'], {}), '(deriv, yinit, time)\n', (695, 715), False, 'from scipy.integrate import odeint\n'), ((717, 729), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (727, 729), True, 'import matplotlib.pyplot as plt\n'), ((764, 805), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'y[:, 0]'], {'color': '"""deeppink"""'}), "(time, y[:, 0], color='deeppink')\n", (772, 805), True, 'import matplotlib.pyplot as plt\n'), ((806, 821), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (816, 821), True, 'import matplotlib.pyplot as plt\n'), ((822, 837), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (832, 837), True, 'import matplotlib.pyplot as plt\n'), ((838, 848), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (846, 848), True, 'import matplotlib.pyplot as plt\n'), ((512, 552), 'numpy.array', 'np.array', (['[y[1], -(a * y[0] + b * y[1])]'], {}), '([y[1], -(a * y[0] + b * y[1])])\n', (520, 552), True, 'import numpy as np\n')] |
# --------------
# Importing header files
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Reading file
data = np.genfromtxt(path, delimiter=",", skip_header=1)
#Code starts here
#analyzing age distribution
census = np.concatenate([data,new_record])
print(census.shape)
age = census[:,0]
max_age = np.max(age)
min_age = np.min(age)
age_mean = np.mean(age)
age_std = np.std(age)
print(max_age)
print(min_age)
print(age_mean)
print(age_std)
#identifying minority race
race_0 = []
race_1 = []
race_2 = []
race_3 = []
race_4 = []
for i in census[:,2]:
if i == 0:
race_0.append(i)
if i ==1 :
race_1.append(i)
if i ==2:
race_2.append(i)
if i == 3:
race_3.append(i)
if i == 4:
race_4.append(i)
len_0 = len(race_0)
len_1 = len(race_1)
len_2 = len(race_2)
len_3 = len(race_3)
len_4 = len(race_4)
len_race = [len_0,len_1,len_2,len_3,len_4]
minority_race = len_race.index(np.min(len_race))
print(minority_race)
#subsetting and analyzing senior citizen's working hours
senior_citizens = census[census[:,0]>60]
working_hours_sum = np.sum(senior_citizens[:,6])
senior_citizens_len = len(senior_citizens)
avg_working_hours = working_hours_sum/senior_citizens_len
print(avg_working_hours)
#anlyzing pay according to their education
high = census[census[:,1]>10]
low = census[census[:,1]<=10]
avg_pay_high = np.mean(high[:,7])
avg_pay_low = np.mean(low[:,7])
print(avg_pay_high)
print(avg_pay_low)
| [
"numpy.sum",
"warnings.filterwarnings",
"numpy.std",
"numpy.genfromtxt",
"numpy.max",
"numpy.min",
"numpy.mean",
"numpy.concatenate"
] | [((82, 115), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (105, 115), False, 'import warnings\n'), ((203, 252), 'numpy.genfromtxt', 'np.genfromtxt', (['path'], {'delimiter': '""","""', 'skip_header': '(1)'}), "(path, delimiter=',', skip_header=1)\n", (216, 252), True, 'import numpy as np\n'), ((313, 347), 'numpy.concatenate', 'np.concatenate', (['[data, new_record]'], {}), '([data, new_record])\n', (327, 347), True, 'import numpy as np\n'), ((400, 411), 'numpy.max', 'np.max', (['age'], {}), '(age)\n', (406, 411), True, 'import numpy as np\n'), ((423, 434), 'numpy.min', 'np.min', (['age'], {}), '(age)\n', (429, 434), True, 'import numpy as np\n'), ((447, 459), 'numpy.mean', 'np.mean', (['age'], {}), '(age)\n', (454, 459), True, 'import numpy as np\n'), ((471, 482), 'numpy.std', 'np.std', (['age'], {}), '(age)\n', (477, 482), True, 'import numpy as np\n'), ((1223, 1252), 'numpy.sum', 'np.sum', (['senior_citizens[:, 6]'], {}), '(senior_citizens[:, 6])\n', (1229, 1252), True, 'import numpy as np\n'), ((1507, 1526), 'numpy.mean', 'np.mean', (['high[:, 7]'], {}), '(high[:, 7])\n', (1514, 1526), True, 'import numpy as np\n'), ((1541, 1559), 'numpy.mean', 'np.mean', (['low[:, 7]'], {}), '(low[:, 7])\n', (1548, 1559), True, 'import numpy as np\n'), ((1060, 1076), 'numpy.min', 'np.min', (['len_race'], {}), '(len_race)\n', (1066, 1076), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
###########################################################################################
# Implementation of CURE (Clustering Using Representatives) Clustering Algorithm
# Author for codes: <NAME>(<EMAIL>)
# Paper: https://www.sciencedirect.com/science/article/pii/S0306437901000084
# Reference: https://github.com/Kchu/CURE-cluster-python
###########################################################################################
import numpy as np
import scipy.spatial.distance as distance
import sys
# Returns the distance between two vectors
def dist(vecA, vecB):
return np.sqrt(np.power(vecA - vecB, 2).sum())
# This class describes the data structure and method of operation for CURE clustering.
class CureCluster:
def __init__(self, id__, center__):
self.points = center__
self.repPoints = center__
self.center = center__
self.index = [id__]
def __repr__(self):
return "Cluster " + " Size: " + str(len(self.points))
# Computes and stores the centroid of this cluster, based on its points
def computeCentroid(self, clust):
totalPoints_1 = len(self.index)
totalPoints_2 = len(clust.index)
self.center = (self.center*totalPoints_1 + clust.center*totalPoints_2) / (totalPoints_1 + totalPoints_2)
# Computes and stores representative points for this cluster
def generateRepPoints(self, numRepPoints, alpha):
tempSet = None
for i in range(1, numRepPoints+1):
maxDist = 0
maxPoint = None
for p in range(0, len(self.index)):
if i == 1:
minDist = dist(self.points[p,:], self.center)
else:
X = np.vstack([tempSet, self.points[p, :]])
tmpDist = distance.pdist(X)
minDist = tmpDist.min()
if minDist >= maxDist:
maxDist = minDist
maxPoint = self.points[p,:]
if tempSet is None:
tempSet = maxPoint
else:
tempSet = np.vstack((tempSet, maxPoint))
for j in range(len(tempSet)):
if self.repPoints is None:
self.repPoints = tempSet[j,:] + alpha * (self.center - tempSet[j,:])
else:
self.repPoints = np.vstack((self.repPoints, tempSet[j,:] + alpha * (self.center - tempSet[j,:])))
# Computes and stores distance between this cluster and the other one.
def distRep(self, clust):
distRep = float('inf')
for repA in self.repPoints:
if type(clust.repPoints[0]) != list:
repB = clust.repPoints
distTemp = dist(repA, repB)
if distTemp < distRep:
distRep = distTemp
else:
for repB in clust.repPoints:
distTemp = dist(repA, repB)
if distTemp < distRep:
distRep = distTemp
return distRep
# Merges this cluster with the given cluster, recomputing the centroid and the representative points.
def mergeWithCluster(self, clust, numRepPoints, alpha):
self.computeCentroid(clust)
self.points = np.vstack((self.points, clust.points))
self.index = np.append(self.index, clust.index)
self.repPoints = None
self.generateRepPoints(numRepPoints, alpha)
# Describe the process of the CURE algorithm
def runCURE(data, numRepPoints, alpha, numDesCluster):
# Initialization
Clusters = []
numCluster = len(data)
numPts = len(data)
distCluster = np.ones([len(data), len(data)])
distCluster = distCluster * float('inf')
for idPoint in range(len(data)):
newClust = CureCluster(idPoint, data[idPoint,:])
Clusters.append(newClust)
for row in range(0, numPts):
for col in range(0, row):
distCluster[row][col] = dist(Clusters[row].center, Clusters[col].center)
while numCluster > numDesCluster:
if np.mod(numCluster, 50) == 0:
print('Cluster count:', numCluster)
# Find a pair of closet clusters
minIndex = np.where(distCluster == np.min(distCluster))
minIndex1 = minIndex[0][0]
minIndex2 = minIndex[1][0]
# Merge
Clusters[minIndex1].mergeWithCluster(Clusters[minIndex2], numRepPoints, alpha)
# Update the distCluster matrix
for i in range(0, minIndex1):
distCluster[minIndex1, i] = Clusters[minIndex1].distRep(Clusters[i])
for i in range(minIndex1+1, numCluster):
distCluster[i, minIndex1] = Clusters[minIndex1].distRep(Clusters[i])
# Delete the merged cluster and its disCluster vector.
distCluster = np.delete(distCluster, minIndex2, axis=0)
distCluster = np.delete(distCluster, minIndex2, axis=1)
del Clusters[minIndex2]
numCluster = numCluster - 1
print('Cluster count:', numCluster)
# Generate sample labels
Label = [0] * numPts
for i in range(0, len(Clusters)):
for j in range(0, len(Clusters[i].index)):
Label[Clusters[i].index[j]] = i + 1
return Label
| [
"numpy.power",
"numpy.mod",
"numpy.append",
"numpy.min",
"scipy.spatial.distance.pdist",
"numpy.delete",
"numpy.vstack"
] | [((3279, 3317), 'numpy.vstack', 'np.vstack', (['(self.points, clust.points)'], {}), '((self.points, clust.points))\n', (3288, 3317), True, 'import numpy as np\n'), ((3339, 3373), 'numpy.append', 'np.append', (['self.index', 'clust.index'], {}), '(self.index, clust.index)\n', (3348, 3373), True, 'import numpy as np\n'), ((4793, 4834), 'numpy.delete', 'np.delete', (['distCluster', 'minIndex2'], {'axis': '(0)'}), '(distCluster, minIndex2, axis=0)\n', (4802, 4834), True, 'import numpy as np\n'), ((4857, 4898), 'numpy.delete', 'np.delete', (['distCluster', 'minIndex2'], {'axis': '(1)'}), '(distCluster, minIndex2, axis=1)\n', (4866, 4898), True, 'import numpy as np\n'), ((4062, 4084), 'numpy.mod', 'np.mod', (['numCluster', '(50)'], {}), '(numCluster, 50)\n', (4068, 4084), True, 'import numpy as np\n'), ((618, 642), 'numpy.power', 'np.power', (['(vecA - vecB)', '(2)'], {}), '(vecA - vecB, 2)\n', (626, 642), True, 'import numpy as np\n'), ((2126, 2156), 'numpy.vstack', 'np.vstack', (['(tempSet, maxPoint)'], {}), '((tempSet, maxPoint))\n', (2135, 2156), True, 'import numpy as np\n'), ((2370, 2456), 'numpy.vstack', 'np.vstack', (['(self.repPoints, tempSet[j, :] + alpha * (self.center - tempSet[j, :]))'], {}), '((self.repPoints, tempSet[j, :] + alpha * (self.center - tempSet[j,\n :])))\n', (2379, 2456), True, 'import numpy as np\n'), ((4224, 4243), 'numpy.min', 'np.min', (['distCluster'], {}), '(distCluster)\n', (4230, 4243), True, 'import numpy as np\n'), ((1758, 1797), 'numpy.vstack', 'np.vstack', (['[tempSet, self.points[p, :]]'], {}), '([tempSet, self.points[p, :]])\n', (1767, 1797), True, 'import numpy as np\n'), ((1828, 1845), 'scipy.spatial.distance.pdist', 'distance.pdist', (['X'], {}), '(X)\n', (1842, 1845), True, 'import scipy.spatial.distance as distance\n')] |
# Taken from
# https://sourceware.org/ml/gdb/2013-04/msg00104.html
import gdb
import cv2.cv as cv
import numpy as np
class PcvCommand(gdb.Command):
def __init__(self):
super(PcvCommand, self).__init__("pcv",
gdb.COMMAND_DATA,
gdb.COMPLETE_SYMBOL)
def invoke(self, arg, from_tty):
args = gdb.string_to_argv(arg)
# generally, we type "plot someimage" in the GDB commandline
# where "someimage" is an instance of cv::Mat
v = gdb.parse_and_eval(args[0])
# the value v is a gdb.Value object of C++
# code's cv::Mat, we need to translate to
# a python object under cv2.cv
image_size = (v['cols'],v['rows'])
# print v
# these two below lines do not work. I don't know why
# channel = gdb.execute("call "+ args[0] + ".channels()", False, True)
# channel = v.channels();
CV_8U =0
CV_8S =1
CV_16U=2
CV_16S=3
CV_32S=4
CV_32F=5
CV_64F=6
CV_USRTYPE1=7
CV_CN_MAX = 512
CV_CN_SHIFT = 3
CV_MAT_CN_MASK = (CV_CN_MAX - 1) << CV_CN_SHIFT
flags = v['flags']
channel = (((flags) & CV_MAT_CN_MASK) >> CV_CN_SHIFT) + 1
CV_DEPTH_MAX = (1 << CV_CN_SHIFT)
CV_MAT_DEPTH_MASK = CV_DEPTH_MAX - 1
depth = (flags) & CV_MAT_DEPTH_MASK
IPL_DEPTH_SIGN = 0x80000000
cv_elem_size = (((4<<28)|0x8442211) >> depth*4) & 15
if (depth == CV_8S or depth == CV_16S or depth == CV_32S):
mask = IPL_DEPTH_SIGN
else:
mask = 0
ipl_depth = cv_elem_size*8 | mask
img = cv.CreateImageHeader(image_size, ipl_depth, channel)
# conver the v['data'] type to "char*" type
char_type = gdb.lookup_type("char")
char_pointer_type =char_type.pointer()
buffer = v['data'].cast(char_pointer_type)
# read bytes from inferior's memory, because
# we run the opencv-python module in GDB's own process
# otherwise, we use memory corss processes
buf = v['step']['buf']
bytes = buf[0] * v['rows'] # buf[0] is the step? Not quite sure.
inferior = gdb.selected_inferior()
mem = inferior.read_memory(buffer, bytes)
# set the img's raw data
cv.SetData(img, mem)
mat = np.asarray(img[:,:])
print ("Type: {}".format(mat.dtype))
print (mat)
PcvCommand()
# I(3)
# {flags = 1124024324, dims = 2, rows = 3, cols = 3, data = 0x6087d0 "\001", refcount = 0x6087f4, datastart = 0x6087d0 "\001", dataend = 0x6087f4 "\002", datalimit = 0x6087f4 "\002", allocator = 0x0, size = {p = 0x7fffffffd688}, step = {p = 0x7fffffffd6d0, buf = {12, 4}}}
# [0, 1, 0]
# {flags = 1124057092, dims = 2, rows = 1, cols = 3, data = 0x6087dc "", refcount = 0x6087f4, datastart = 0x6087d0 "\001", dataend = 0x6087f4 "\002", datalimit = 0x6087f4 "\002", allocator = 0x0, size = {p = 0x7fffffffd6e8}, step = {p = 0x7fffffffd730, buf = {12, 4}}} | [
"gdb.lookup_type",
"numpy.asarray",
"cv2.cv.SetData",
"gdb.selected_inferior",
"gdb.string_to_argv",
"gdb.parse_and_eval",
"cv2.cv.CreateImageHeader"
] | [((404, 427), 'gdb.string_to_argv', 'gdb.string_to_argv', (['arg'], {}), '(arg)\n', (422, 427), False, 'import gdb\n'), ((581, 608), 'gdb.parse_and_eval', 'gdb.parse_and_eval', (['args[0]'], {}), '(args[0])\n', (599, 608), False, 'import gdb\n'), ((1766, 1818), 'cv2.cv.CreateImageHeader', 'cv.CreateImageHeader', (['image_size', 'ipl_depth', 'channel'], {}), '(image_size, ipl_depth, channel)\n', (1786, 1818), True, 'import cv2.cv as cv\n'), ((1900, 1923), 'gdb.lookup_type', 'gdb.lookup_type', (['"""char"""'], {}), "('char')\n", (1915, 1923), False, 'import gdb\n'), ((2329, 2352), 'gdb.selected_inferior', 'gdb.selected_inferior', ([], {}), '()\n', (2350, 2352), False, 'import gdb\n'), ((2453, 2473), 'cv2.cv.SetData', 'cv.SetData', (['img', 'mem'], {}), '(img, mem)\n', (2463, 2473), True, 'import cv2.cv as cv\n'), ((2488, 2509), 'numpy.asarray', 'np.asarray', (['img[:, :]'], {}), '(img[:, :])\n', (2498, 2509), True, 'import numpy as np\n')] |
import numpy as np
from ._ReadDataIndex import _ReadDataIndex
from ._UpdateDataIndex import _UpdateDataIndex
import os
def _DeleteDate(Date,fname,path,Confirm=True):
'''
Delete a single day of data for an instrument
Inputs
======
Date : int
Integer date in format yyyymmdd
fname : str
Full name and path of index file
path : str
Path to data product
Confirm : bool
Confirm whether to delete each file before deleting
'''
#read the index
idx = _ReadDataIndex(fname)
#find the indices where the dates match the date to be deleted
idel = np.where(idx.Date == Date)[0]
if idel.size == 0:
print('No files found for the date {:d}'.format(Date))
return
#loop through each on deleting the files
ndel = idel.size
removed = np.zeros(idx.size,dtype='bool')
for i in range(0,ndel):
inpt = 'y'
if Confirm:
inpt = input('Delete the file {:s}? (y/n):\n'.format(idx.FileName[idel[i]]))
if inpt:
os.system('rm -v '+path+idx.FileName[idel[i]])
removed[idel[i]] = True
#keep the remaining entries
ikeep = np.where(removed == False)[0]
idx = idx[ikeep]
#update index file
_UpdateDataIndex(idx,fname)
| [
"numpy.where",
"numpy.zeros",
"os.system"
] | [((757, 789), 'numpy.zeros', 'np.zeros', (['idx.size'], {'dtype': '"""bool"""'}), "(idx.size, dtype='bool')\n", (765, 789), True, 'import numpy as np\n'), ((567, 593), 'numpy.where', 'np.where', (['(idx.Date == Date)'], {}), '(idx.Date == Date)\n', (575, 593), True, 'import numpy as np\n'), ((1051, 1077), 'numpy.where', 'np.where', (['(removed == False)'], {}), '(removed == False)\n', (1059, 1077), True, 'import numpy as np\n'), ((935, 985), 'os.system', 'os.system', (["('rm -v ' + path + idx.FileName[idel[i]])"], {}), "('rm -v ' + path + idx.FileName[idel[i]])\n", (944, 985), False, 'import os\n')] |
import time
# Start Time
start_time = time.time()
from tensorflow.keras.models import load_model
from time import sleep
from keras.preprocessing.image import img_to_array
from keras.preprocessing import image
import cv2
import numpy as np
import os
from mtcnn import MTCNN
num_classes = 5 # we have 5 kinds of emotions
img_rows, img_cols = 48, 48
# Dataset Path
# test_data_dir = os.path.join("data","test")
test_data_dir = os.path.join("Flickr","yo")
model_name = input("\n\nEnter the model name : ")
model_name = model_name + '.h5'
print("-------------------Loading the model------------------------------")
model_path = os.path.join("model",model_name)
classifier = load_model(model_path)
print("-------------------Model Loaded Succesfully------------------------------")
class_labels = ['Angry','Happy','Neutral','Sad','Surprise'] # Remember to keep in alphabetical order
# class_labels = ['Angry','Happy','Sad'] # Remember to keep in alphabetical order
def load_images_from_folder(folder):
images = []
for filename in os.listdir(folder):
img = cv2.imread(os.path.join(folder,filename))
if img is not None:
images.append(img)
return images
# Count array to keep track of all correct predictions
count = [0 for i in range(num_classes)]
# Total count array
tot_count = [0 for i in range(num_classes)]
for emotion in range(num_classes):
print("The current emotion is : ", class_labels[emotion])
# Getting the images
# path = os.path.join("data","test", class_labels[emotion])
path = os.path.join("Flickr","yo", class_labels[emotion])
image_list = load_images_from_folder(path)
# Setting the total count and initial count
tot_count[emotion] = len(image_list)
correct_pred = 0
for img in image_list:
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
roi_gray = cv2.resize(gray,(48,48),interpolation=cv2.INTER_AREA)
roi = roi_gray.astype('float')/255.0
roi = img_to_array(roi)
roi = np.expand_dims(roi,axis=0)
# Getting the prediction for an image
cur_prediction = classifier.predict(roi)[0]
cur_prediction = class_labels[list(cur_prediction).index(max(cur_prediction))]
# print("Current Prediction : ", cur_prediction )
# print("True Prediction : ", class_labels[emotion])
# if(class_labels[emotion] != 'Sad'):
if(cur_prediction == class_labels[emotion]):
count[emotion] += 1
# else:
# if(cur_prediction == class_labels[emotion] or cur_prediction == 'Neutral'):
# count[emotion] += 1
print("\nTesting Summary for the model : ", model_name)
for i in range(num_classes):
print(class_labels[i], " : ", (count[i]/tot_count[i])*100)
print("\nTotal Accuracy : ")
print((sum(count)/sum(tot_count))*100) | [
"tensorflow.keras.models.load_model",
"cv2.cvtColor",
"numpy.expand_dims",
"time.time",
"keras.preprocessing.image.img_to_array",
"os.path.join",
"os.listdir",
"cv2.resize"
] | [((39, 50), 'time.time', 'time.time', ([], {}), '()\n', (48, 50), False, 'import time\n'), ((433, 461), 'os.path.join', 'os.path.join', (['"""Flickr"""', '"""yo"""'], {}), "('Flickr', 'yo')\n", (445, 461), False, 'import os\n'), ((635, 668), 'os.path.join', 'os.path.join', (['"""model"""', 'model_name'], {}), "('model', model_name)\n", (647, 668), False, 'import os\n'), ((681, 703), 'tensorflow.keras.models.load_model', 'load_model', (['model_path'], {}), '(model_path)\n', (691, 703), False, 'from tensorflow.keras.models import load_model\n'), ((1046, 1064), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (1056, 1064), False, 'import os\n'), ((1548, 1599), 'os.path.join', 'os.path.join', (['"""Flickr"""', '"""yo"""', 'class_labels[emotion]'], {}), "('Flickr', 'yo', class_labels[emotion])\n", (1560, 1599), False, 'import os\n'), ((1781, 1818), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1793, 1818), False, 'import cv2\n'), ((1831, 1887), 'cv2.resize', 'cv2.resize', (['gray', '(48, 48)'], {'interpolation': 'cv2.INTER_AREA'}), '(gray, (48, 48), interpolation=cv2.INTER_AREA)\n', (1841, 1887), False, 'import cv2\n'), ((1932, 1949), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['roi'], {}), '(roi)\n', (1944, 1949), False, 'from keras.preprocessing.image import img_to_array\n'), ((1958, 1985), 'numpy.expand_dims', 'np.expand_dims', (['roi'], {'axis': '(0)'}), '(roi, axis=0)\n', (1972, 1985), True, 'import numpy as np\n'), ((1091, 1121), 'os.path.join', 'os.path.join', (['folder', 'filename'], {}), '(folder, filename)\n', (1103, 1121), False, 'import os\n')] |
import imutils
import numpy as np
import argparse
import imutils
import dlib
import cv2
from collections import OrderedDict
def rect_to_bb(rect):
x = rect.left()
y = rect.top()
w = rect.right() - x
h = rect.bottom() - y
return (x, y, w, h)
def shape_to_np(shape, dtype="int"):
coords = np.zeros((shape.num_parts, 2), dtype=dtype)
for i in range(0, shape.num_parts):
coords[i] = (shape.part(i).x, shape.part(i).y)
return coords
FACIAL_LANDMARKS_68_IDXS = OrderedDict([
("mouth", (48, 68)),
("inner_mouth", (60, 68)),
("right_eyebrow", (17, 22)),
("left_eyebrow", (22, 27)),
("right_eye", (36, 42)),
("left_eye", (42, 48)),
("nose", (27, 36)),
("jaw", (0, 17))
])
FACIAL_LANDMARKS_5_IDXS = OrderedDict([
("right_eye", (2, 3)),
("left_eye", (0, 1)),
("nose", (4))
])
FACIAL_LANDMARKS_IDXS = FACIAL_LANDMARKS_68_IDXS
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--shape-predictor", required=True,
help="path to facial landmark predictor")
ap.add_argument("-i", "--image", required=True,
help="path to input image")
args = vars(ap.parse_args())
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shape_predictor"])
image = cv2.imread('sara.jpg',1)
image = imutils.resize(image, width=500)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 1)
for (i, rect) in enumerate(rects):
shape = predictor(gray, rect)
shape = shape_to_np(shape)
(x, y, w, h) = rect_to_bb(rect)
# cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
# cv2.putText(image, "Face #{}".format(i + 1), (x - 10, y - 10),
#cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
for (x, y) in shape:
cv2.circle(image, (x, y), 1, (0, 0, 255), -1)
cv2.imshow("Output", image)
cv2.waitKey(0)
| [
"cv2.circle",
"argparse.ArgumentParser",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.imshow",
"numpy.zeros",
"cv2.imread",
"dlib.get_frontal_face_detector",
"collections.OrderedDict",
"dlib.shape_predictor",
"imutils.resize"
] | [((485, 700), 'collections.OrderedDict', 'OrderedDict', (["[('mouth', (48, 68)), ('inner_mouth', (60, 68)), ('right_eyebrow', (17, 22)\n ), ('left_eyebrow', (22, 27)), ('right_eye', (36, 42)), ('left_eye', (\n 42, 48)), ('nose', (27, 36)), ('jaw', (0, 17))]"], {}), "([('mouth', (48, 68)), ('inner_mouth', (60, 68)), (\n 'right_eyebrow', (17, 22)), ('left_eyebrow', (22, 27)), ('right_eye', (\n 36, 42)), ('left_eye', (42, 48)), ('nose', (27, 36)), ('jaw', (0, 17))])\n", (496, 700), False, 'from collections import OrderedDict\n'), ((728, 799), 'collections.OrderedDict', 'OrderedDict', (["[('right_eye', (2, 3)), ('left_eye', (0, 1)), ('nose', 4)]"], {}), "([('right_eye', (2, 3)), ('left_eye', (0, 1)), ('nose', 4)])\n", (739, 799), False, 'from collections import OrderedDict\n'), ((863, 888), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (886, 888), False, 'import argparse\n'), ((1108, 1140), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (1138, 1140), False, 'import dlib\n'), ((1153, 1198), 'dlib.shape_predictor', 'dlib.shape_predictor', (["args['shape_predictor']"], {}), "(args['shape_predictor'])\n", (1173, 1198), False, 'import dlib\n'), ((1208, 1233), 'cv2.imread', 'cv2.imread', (['"""sara.jpg"""', '(1)'], {}), "('sara.jpg', 1)\n", (1218, 1233), False, 'import cv2\n'), ((1241, 1273), 'imutils.resize', 'imutils.resize', (['image'], {'width': '(500)'}), '(image, width=500)\n', (1255, 1273), False, 'import imutils\n'), ((1281, 1320), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (1293, 1320), False, 'import cv2\n'), ((1748, 1775), 'cv2.imshow', 'cv2.imshow', (['"""Output"""', 'image'], {}), "('Output', image)\n", (1758, 1775), False, 'import cv2\n'), ((1776, 1790), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1787, 1790), False, 'import cv2\n'), ((301, 344), 'numpy.zeros', 'np.zeros', (['(shape.num_parts, 2)'], {'dtype': 'dtype'}), '((shape.num_parts, 2), dtype=dtype)\n', (309, 344), True, 'import numpy as np\n'), ((1702, 1747), 'cv2.circle', 'cv2.circle', (['image', '(x, y)', '(1)', '(0, 0, 255)', '(-1)'], {}), '(image, (x, y), 1, (0, 0, 255), -1)\n', (1712, 1747), False, 'import cv2\n')] |
from typing import List, Optional
import numpy as np
import copy
from ..classification import ClassificationAttacker, Classifier, ClassifierGoal
from ...text_process.tokenizer import Tokenizer, get_default_tokenizer
from ...attack_assist.substitute.word import WordSubstitute, get_default_substitute
from ...utils import get_language, check_language, language_by_name
from ...exceptions import WordNotInDictionaryException
from ...tags import Tag
from ...attack_assist.filter_words import get_default_filter_words
class PSOAttacker(ClassificationAttacker):
@property
def TAGS(self):
return { self.__lang_tag, Tag("get_pred", "victim"), Tag("get_prob", "victim")}
def __init__(self,
pop_size : int = 20,
max_iters : int = 20,
tokenizer : Optional[Tokenizer] = None,
substitute : Optional[WordSubstitute] = None,
filter_words : List[str] = None,
lang = None
):
"""
Word-level Textual Adversarial Attacking as Combinatorial Optimization. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>. ACL 2020.
`[pdf] <https://www.aclweb.org/anthology/2020.acl-main.540.pdf>`__
`[code] <https://github.com/thunlp/SememePSO-Attack>`__
Args:
pop_size: Genetic algorithm popluation size. **Default:** 20
max_iter: Maximum generations of pso algorithm. **Default:** 20
tokenizer: A tokenizer that will be used during the attack procedure. Must be an instance of :py:class:`.Tokenizer`
substitute: A substitute that will be used during the attack procedure. Must be an instance of :py:class:`.WordSubstitute`
lang: The language used in attacker. If is `None` then `attacker` will intelligently select the language based on other parameters.
filter_words: A list of words that will be preserved in the attack procesudre.
:Classifier Capacity:
* get_pred
* get_prob
"""
lst = []
if tokenizer is not None:
lst.append(tokenizer)
if substitute is not None:
lst.append(substitute)
if len(lst) > 0:
self.__lang_tag = get_language(lst)
else:
self.__lang_tag = language_by_name(lang)
if self.__lang_tag is None:
raise ValueError("Unknown language `%s`" % lang)
if substitute is None:
substitute = get_default_substitute(self.__lang_tag)
self.substitute = substitute
if tokenizer is None:
tokenizer = get_default_tokenizer(self.__lang_tag)
self.tokenizer = tokenizer
self.pop_size = pop_size
self.max_iters = max_iters
if filter_words is None:
filter_words = get_default_filter_words(self.__lang_tag)
self.filter_words = set(filter_words)
check_language([self.tokenizer, self.substitute], self.__lang_tag)
def attack(self, victim: Classifier, sentence, goal: ClassifierGoal):
self.invoke_dict = {}
x_orig = sentence.lower()
x_orig = self.tokenizer.tokenize(x_orig)
x_pos = list(map(lambda x: x[1], x_orig))
x_orig = list(map(lambda x: x[0], x_orig))
x_len = len(x_orig)
neighbours_nums = [
min(self.get_neighbour_num(word, pos),10) if word not in self.filter_words else 0
for word, pos in zip(x_orig, x_pos)
]
neighbours = [
self.get_neighbours(word, pos)
if word not in self.filter_words
else []
for word, pos in zip(x_orig, x_pos)
]
if np.sum(neighbours_nums) == 0:
return None
w_select_probs = neighbours_nums / np.sum(neighbours_nums)
pop = self.generate_population(x_orig, neighbours, w_select_probs, x_len)
part_elites = pop
if goal.targeted:
all_elite_score = 100
part_elites_scores = [100 for _ in range(self.pop_size)]
else:
all_elite_score = -1
part_elites_scores = [-1 for _ in range(self.pop_size)]
all_elite = pop[0]
Omega_1 = 0.8
Omega_2 = 0.2
C1_origin = 0.8
C2_origin = 0.2
V = [np.random.uniform(-3, 3) for _ in range(self.pop_size)]
V_P = [[V[t] for _ in range(x_len)] for t in range(self.pop_size)]
for i in range(self.max_iters):
pop_preds = self.predict_batch(victim, pop)
pop_scores = pop_preds[:, goal.target]
if goal.targeted:
pop_ranks = np.argsort(pop_scores)[::-1]
top_attack = pop_ranks[0]
if np.max(pop_scores) > all_elite_score:
all_elite = pop[top_attack]
all_elite_score = np.max(pop_scores)
for k in range(self.pop_size):
if pop_scores[k] > part_elites_scores[k]:
part_elites[k] = pop[k]
part_elites_scores[k] = pop_scores[k]
if np.argmax(pop_preds[top_attack, :]) == goal.target:
return self.tokenizer.detokenize(pop[top_attack])
else:
pop_ranks = np.argsort(pop_scores)
top_attack = pop_ranks[0]
if np.min(pop_scores) < all_elite_score:
all_elite = pop[top_attack]
all_elite_score = np.min(pop_scores)
for k in range(self.pop_size):
if pop_scores[k] < part_elites_scores[k]:
part_elites[k] = pop[k]
part_elites_scores[k] = pop_scores[k]
if np.argmax(pop_preds[top_attack, :]) != goal.target:
return self.tokenizer.detokenize(pop[top_attack])
Omega = (Omega_1 - Omega_2) * (self.max_iters - i) / self.max_iters + Omega_2
C1 = C1_origin - i / self.max_iters * (C1_origin - C2_origin)
C2 = C2_origin + i / self.max_iters * (C1_origin - C2_origin)
for id in range(self.pop_size):
for dim in range(x_len):
V_P[id][dim] = Omega * V_P[id][dim] + (1 - Omega) * (
self.equal(pop[id][dim], part_elites[id][dim]) + self.equal(pop[id][dim],
all_elite[dim]))
turn_prob = [self.sigmod(V_P[id][d]) for d in range(x_len)]
P1 = C1
P2 = C2
if np.random.uniform() < P1:
pop[id] = self.turn(part_elites[id], pop[id], turn_prob, x_len)
if np.random.uniform() < P2:
pop[id] = self.turn(all_elite, pop[id], turn_prob, x_len)
pop_preds = self.predict_batch(victim, pop)
pop_scores = pop_preds[:, goal.target]
if goal.targeted:
pop_ranks = np.argsort(pop_scores)[::-1]
top_attack = pop_ranks[0]
if np.max(pop_scores) > all_elite_score:
all_elite = pop[top_attack]
all_elite_score = np.max(pop_scores)
for k in range(self.pop_size):
if pop_scores[k] > part_elites_scores[k]:
part_elites[k] = pop[k]
part_elites_scores[k] = pop_scores[k]
if np.argmax(pop_preds[top_attack, :]) == goal.target:
return self.tokenizer.detokenize( pop[top_attack] )
else:
pop_ranks = np.argsort(pop_scores)
top_attack = pop_ranks[0]
if np.min(pop_scores) < all_elite_score:
all_elite = pop[top_attack]
all_elite_score = np.min(pop_scores)
for k in range(self.pop_size):
if pop_scores[k] < part_elites_scores[k]:
part_elites[k] = pop[k]
part_elites_scores[k] = pop_scores[k]
if np.argmax(pop_preds[top_attack, :]) != goal.target:
return self.tokenizer.detokenize( pop[top_attack] )
new_pop = []
for x in pop:
change_ratio = self.count_change_ratio(x, x_orig, x_len)
p_change = 1 - 2 * change_ratio
if np.random.uniform() < p_change:
tem = self.mutate( x, x_orig, neighbours, w_select_probs)
new_pop.append(tem)
else:
new_pop.append(x)
pop = new_pop
return None #Failed
def predict_batch(self, victim, sentences):
return np.array([self.predict(victim, s) for s in sentences])
def predict(self, victim, sentence):
if tuple(sentence) in self.invoke_dict:
return self.invoke_dict[tuple(sentence)]
tem = victim.get_prob(self.make_batch([sentence]))[0]
self.invoke_dict[tuple(sentence)] = tem
return tem
def do_replace(self, x_cur, pos, new_word):
x_new = x_cur.copy()
x_new[pos] = new_word
return x_new
def generate_population(self, x_orig, neighbours_list, w_select_probs, x_len):
pop = []
x_len = w_select_probs.shape[0]
for i in range(self.pop_size):
r = np.random.choice(x_len, 1, p=w_select_probs)[0]
replace_list = neighbours_list[r]
sub = np.random.choice(replace_list, 1)[0]
tem = self.do_replace(x_orig, r, sub)
pop.append(tem)
return pop
def turn(self, x1, x2, prob, x_len):
x_new = copy.deepcopy(x2)
for i in range(x_len):
if np.random.uniform() < prob[i]:
x_new[i] = x1[i]
return x_new
def mutate(self, x, x_orig, neigbhours_list, w_select_probs):
x_len = w_select_probs.shape[0]
rand_idx = np.random.choice(x_len, 1,p=w_select_probs)[0]
while x[rand_idx] != x_orig[rand_idx] and self.sum_diff(x_orig,x) < np.sum(np.sign(w_select_probs)):
rand_idx = np.random.choice(x_len, 1,p=w_select_probs)[0]
replace_list = neigbhours_list[rand_idx]
sub_idx= np.random.choice(len(replace_list), 1)[0]
new_x=copy.deepcopy(x)
new_x[rand_idx]=replace_list[sub_idx]
return new_x
def sum_diff(self, x_orig, x_cur):
ret = 0
for wa, wb in zip(x_orig, x_cur):
if wa != wb:
ret += 1
return ret
def norm(self, n):
tn = []
for i in n:
if i <= 0:
tn.append(0)
else:
tn.append(i)
s = np.sum(tn)
if s == 0:
for i in range(len(tn)):
tn[i] = 1
return [t / len(tn) for t in tn]
new_n = [t / s for t in tn]
return new_n
def get_neighbour_num(self, word, pos):
try:
return len(self.substitute(word, pos))
except WordNotInDictionaryException:
return 0
def get_neighbours(self, word, pos):
try:
return list(
map(
lambda x: x[0],
self.substitute(word, pos),
)
)
except WordNotInDictionaryException:
return []
def make_batch(self, sents):
return [self.tokenizer.detokenize(sent) for sent in sents]
def equal(self, a, b):
if a == b:
return -3
else:
return 3
def sigmod(self, n):
return 1 / (1 + np.exp(-n))
def count_change_ratio(self, x, x_orig, x_len):
change_ratio = float(np.sum(np.array(x) != np.array(x_orig))) / float(x_len)
return change_ratio
| [
"numpy.random.uniform",
"copy.deepcopy",
"numpy.sum",
"numpy.argmax",
"numpy.sign",
"numpy.argsort",
"numpy.max",
"numpy.min",
"numpy.array",
"numpy.exp",
"numpy.random.choice"
] | [((9768, 9785), 'copy.deepcopy', 'copy.deepcopy', (['x2'], {}), '(x2)\n', (9781, 9785), False, 'import copy\n'), ((10391, 10407), 'copy.deepcopy', 'copy.deepcopy', (['x'], {}), '(x)\n', (10404, 10407), False, 'import copy\n'), ((10814, 10824), 'numpy.sum', 'np.sum', (['tn'], {}), '(tn)\n', (10820, 10824), True, 'import numpy as np\n'), ((3713, 3736), 'numpy.sum', 'np.sum', (['neighbours_nums'], {}), '(neighbours_nums)\n', (3719, 3736), True, 'import numpy as np\n'), ((3810, 3833), 'numpy.sum', 'np.sum', (['neighbours_nums'], {}), '(neighbours_nums)\n', (3816, 3833), True, 'import numpy as np\n'), ((4321, 4345), 'numpy.random.uniform', 'np.random.uniform', (['(-3)', '(3)'], {}), '(-3, 3)\n', (4338, 4345), True, 'import numpy as np\n'), ((10043, 10087), 'numpy.random.choice', 'np.random.choice', (['x_len', '(1)'], {'p': 'w_select_probs'}), '(x_len, 1, p=w_select_probs)\n', (10059, 10087), True, 'import numpy as np\n'), ((5297, 5319), 'numpy.argsort', 'np.argsort', (['pop_scores'], {}), '(pop_scores)\n', (5307, 5319), True, 'import numpy as np\n'), ((7697, 7719), 'numpy.argsort', 'np.argsort', (['pop_scores'], {}), '(pop_scores)\n', (7707, 7719), True, 'import numpy as np\n'), ((9463, 9507), 'numpy.random.choice', 'np.random.choice', (['x_len', '(1)'], {'p': 'w_select_probs'}), '(x_len, 1, p=w_select_probs)\n', (9479, 9507), True, 'import numpy as np\n'), ((9575, 9608), 'numpy.random.choice', 'np.random.choice', (['replace_list', '(1)'], {}), '(replace_list, 1)\n', (9591, 9608), True, 'import numpy as np\n'), ((9832, 9851), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (9849, 9851), True, 'import numpy as np\n'), ((10222, 10266), 'numpy.random.choice', 'np.random.choice', (['x_len', '(1)'], {'p': 'w_select_probs'}), '(x_len, 1, p=w_select_probs)\n', (10238, 10266), True, 'import numpy as np\n'), ((11726, 11736), 'numpy.exp', 'np.exp', (['(-n)'], {}), '(-n)\n', (11732, 11736), True, 'import numpy as np\n'), ((4658, 4680), 'numpy.argsort', 'np.argsort', (['pop_scores'], {}), '(pop_scores)\n', (4668, 4680), True, 'import numpy as np\n'), ((4748, 4766), 'numpy.max', 'np.max', (['pop_scores'], {}), '(pop_scores)\n', (4754, 4766), True, 'import numpy as np\n'), ((4872, 4890), 'numpy.max', 'np.max', (['pop_scores'], {}), '(pop_scores)\n', (4878, 4890), True, 'import numpy as np\n'), ((5129, 5164), 'numpy.argmax', 'np.argmax', (['pop_preds[top_attack, :]'], {}), '(pop_preds[top_attack, :])\n', (5138, 5164), True, 'import numpy as np\n'), ((5381, 5399), 'numpy.min', 'np.min', (['pop_scores'], {}), '(pop_scores)\n', (5387, 5399), True, 'import numpy as np\n'), ((5505, 5523), 'numpy.min', 'np.min', (['pop_scores'], {}), '(pop_scores)\n', (5511, 5523), True, 'import numpy as np\n'), ((5762, 5797), 'numpy.argmax', 'np.argmax', (['pop_preds[top_attack, :]'], {}), '(pop_preds[top_attack, :])\n', (5771, 5797), True, 'import numpy as np\n'), ((6658, 6677), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (6675, 6677), True, 'import numpy as np\n'), ((6787, 6806), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (6804, 6806), True, 'import numpy as np\n'), ((7056, 7078), 'numpy.argsort', 'np.argsort', (['pop_scores'], {}), '(pop_scores)\n', (7066, 7078), True, 'import numpy as np\n'), ((7146, 7164), 'numpy.max', 'np.max', (['pop_scores'], {}), '(pop_scores)\n', (7152, 7164), True, 'import numpy as np\n'), ((7270, 7288), 'numpy.max', 'np.max', (['pop_scores'], {}), '(pop_scores)\n', (7276, 7288), True, 'import numpy as np\n'), ((7527, 7562), 'numpy.argmax', 'np.argmax', (['pop_preds[top_attack, :]'], {}), '(pop_preds[top_attack, :])\n', (7536, 7562), True, 'import numpy as np\n'), ((7781, 7799), 'numpy.min', 'np.min', (['pop_scores'], {}), '(pop_scores)\n', (7787, 7799), True, 'import numpy as np\n'), ((7905, 7923), 'numpy.min', 'np.min', (['pop_scores'], {}), '(pop_scores)\n', (7911, 7923), True, 'import numpy as np\n'), ((8162, 8197), 'numpy.argmax', 'np.argmax', (['pop_preds[top_attack, :]'], {}), '(pop_preds[top_attack, :])\n', (8171, 8197), True, 'import numpy as np\n'), ((8478, 8497), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (8495, 8497), True, 'import numpy as np\n'), ((10173, 10196), 'numpy.sign', 'np.sign', (['w_select_probs'], {}), '(w_select_probs)\n', (10180, 10196), True, 'import numpy as np\n'), ((11827, 11838), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (11835, 11838), True, 'import numpy as np\n'), ((11842, 11858), 'numpy.array', 'np.array', (['x_orig'], {}), '(x_orig)\n', (11850, 11858), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
@contact:
@Created on: DATE{TIME}
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['CrossEntropy2D', 'BinaryCrossEntropy2D', 'ImageBasedCrossEntropy2D', 'BoundariesRelaxation2D',
'LabelSmoothCrossEntropy', 'LabelSmoothCrossEntropy2D']
class BasicLossModule(nn.Module):
"""
Basic loss module, please do not call this module
Params:
ignore_index: int (default 255). Categories to be ignored of `target`
custom_weight: numpy array or list (default None). The weight of each category. For example,
[0.2, 0.1, ..., 0.1], if `custom_weight` is not None, `batch_weight` will be ignored
batch_weight: bool (default True). If true, the whole batch is used to calculate weights
size_average: bool (default True). Loss will be divided by (h * w)
batch_average: bool (default True). Loss will be divided by (n)
"""
def __init__(self, ignore_index=255, custom_weight=None, batch_weight=False,
size_average=True, batch_average=True, upper_bound=1.0):
super(BasicLossModule, self).__init__()
# Init custom weight
if custom_weight is not None:
if isinstance(custom_weight, list):
custom_weight = torch.from_numpy(np.array(custom_weight, dtype=np.float32)).float()
else:
custom_weight = torch.from_numpy(custom_weight).float()
# Add to properties
self.ignore_index = ignore_index
self.custom_weight = custom_weight
self.batch_weight = batch_weight
self.size_average = size_average
self.batch_average = batch_average
self.upper_bound = upper_bound
def calculate_weights(self, target, num_classes):
"""
Calculate weights of classes based on the training crop
Params:
target: 3-D torch.Tensor. The input target which shape is (n, h, w)
num_classes: int. The number of classes
"""
hist = torch.histc(target, bins=num_classes, min=0, max=num_classes - 1)
hist = hist / hist.sum()
hist = ((hist != 0) * self.upper_bound * (1 - hist)) + 1
return hist
class CrossEntropy2D(BasicLossModule):
"""
The 2D cross entropy loss, note that the module must run on `cuda`.
Params:
ignore_index: int (default 255). Categories to be ignored of `target`
custom_weight: numpy array or list (default None). The weight of each category. For example,
[0.2, 0.1, ..., 0.1], if `custom_weight` is not None, `batch_weight` will be ignored
batch_weight: bool (default True). If true, the whole batch is used to calculate weights
size_average: bool (default True). Loss will be divided by (h * w)
batch_average: bool (default True). Loss will be divided by (n)
Forward:
logit: 4-D torch.Tensor. The predict result without `sigmoid/softmax`, which shape is (n, c, h, w)
target: 3-D torch.Tensor. The input target which shape is (n, h, w)
Note that there's no need to use `softmax/sigmoid` for `logit` before calling this loss module.
"""
def __init__(self, *args, **kwargs):
super(CrossEntropy2D, self).__init__(*args, **kwargs)
def forward(self, logit, target):
assert len(logit.shape) == 4 and len(target.shape) == 3
# Get the size of `logit`
n, c, h, w = logit.size()
# Init weight
if self.custom_weight is not None:
weight = self.custom_weight.to(logit.device)
else:
if self.batch_weight:
weight = self.calculate_weights(target, c).to(logit.device)
else:
weight = None
# `size_average` and `reduce` of `nn.CrossEntropyLoss` are in the process of being deprecated
loss = F.cross_entropy(logit, target.long(), weight, ignore_index=self.ignore_index, reduction='sum')
# loss will be divided by (h * w)
if self.size_average:
loss /= (h * w)
# loss will be divided by (n)
if self.batch_average:
loss /= n
return loss
class BinaryCrossEntropy2D(BasicLossModule):
"""
The binary 2D cross entropy loss, note that the module must run on `cuda`.
Note that `F.binary_cross_entropy_with_logits` do not support `ignore_index`
Params:
ignore_index: int (default 255). Categories to be ignored of `target`
custom_weight: numpy array or list (default None). The weight of each category. For example,
[0.2, 0.8], if `custom_weight` is not None, `batch_weight` will be ignored
batch_weight: bool (default True). If true, the whole batch is used to calculate weights
size_average: bool (default True). Loss will be divided by (h * w)
batch_average: bool (default True). Loss will be divided by (n)
Forward:
logit: 3-D or 4-D torch.Tensor. The predict result without `sigmoid/softmax`, if `logit` is
3-D/4D Tensor, the shape of `logit` should be (n,h,w)/(n,c,h,w) respectively
target: torch.Tensor. The input target which shape should be same as `logit`
Note that there's no need to use `softmax/sigmoid` for `logit` before calling this loss module.
"""
def __init__(self, *args, **kwargs):
super(BinaryCrossEntropy2D, self).__init__(*args, **kwargs)
def forward(self, logit, target):
assert len(logit.shape) == len(target.shape)
# Get the size of `logit`
if len(logit.shape) == 4:
n, c, h, w = logit.size()
elif len(logit.shape) == 3:
n, h, w = logit.size()
else:
raise AttributeError('Expect `logit` is a 3-D or 4-D Tensor, but {}-D instead'.format(len(logit.shape)))
# Reshape as (n, h*w*c)/(n, h*w)
if len(logit.shape) == 4:
logit_rsp = logit.transpose(1, 2).transpose(2, 3).reshape(1, -1)
target_rsp = target.transpose(1, 2).transpose(2, 3).reshape(1, -1)
else:
logit_rsp = logit.reshape(1, -1)
target_rsp = target.reshape(1, -1)
# Get positive/negative/ignore index
pos_index = (target_rsp == 1)
neg_index = (target_rsp == 0)
# ign_index = (target_rsp == self.ignore_index)
ign_index = (target_rsp > 1)
# Convert `target_rsp[ign_index]` to `0` first
target_rsp[ign_index] = 0
# Convert `positive/negative/ignore index` as `bool`
pos_index = pos_index.data.cpu().numpy().astype(bool)
neg_index = neg_index.data.cpu().numpy().astype(bool)
ign_index = ign_index.data.cpu().numpy().astype(bool)
# Calculate the weight
weight = torch.Tensor(logit_rsp.size()).fill_(0).numpy()
if self.custom_weight is not None:
weight[neg_index] = self.weight[0] * 1.0
weight[pos_index] = self.weight[1] * 1.0
weight[ign_index] = 0 # weight for `ignore_index` is 0 !
weight = torch.from_numpy(weight.astype(np.float32)).to(logit.device)
else:
if self.batch_weight:
pos_num = pos_index.sum()
neg_num = neg_index.sum()
sum_num = pos_num + neg_num
if sum_num != 0:
weight[pos_index] = 1 + (neg_num * 1.0 / sum_num) if pos_num > 0 else 1
weight[neg_index] = 1 + (pos_num * 1.0 / sum_num) if neg_num > 0 else 1
weight[ign_index] = 0 # weight for `ignore_index` is 0 !
else:
raise AttributeError('The sum of `pos_index` and `neg_index` is 0')
weight = torch.from_numpy(weight.astype(np.float32)).to(logit.device)
else:
weight = None
# Calculate binary cross entropy loss
loss = F.binary_cross_entropy_with_logits(logit_rsp, target_rsp, weight=weight, reduction='sum')
# loss will be divided by (h * w)
if self.size_average:
loss /= (h * w)
# loss will be divided by (n)
if self.batch_average:
loss /= n
return loss
class ImageBasedCrossEntropy2D(BasicLossModule):
"""
Image Weighted Cross Entropy Loss
Params:
ignore_index: int (default 255). Categories to be ignored of `target`
custom_weight: numpy array or list (default None). The weight of each category. For example,
[0.2, 0.1, ..., 0.1], if `custom_weight` is not None, `batch_weight` will be ignored
batch_weight: bool (default True). If true, the whole batch is used to calculate weights
size_average: bool (default True). Loss will be divided by (h * w)
batch_average: bool (default True). Loss will be divided by (n)
Forward:
logit: 4-D torch.Tensor. The predict result without `sigmoid/softmax`, which shape is (n, c, h, w)
target: 3-D torch.Tensor. The input target which shape is (n, h, w)
Note that there's no need to use `softmax/sigmoid` for `logit` before calling this loss module.
"""
def __init__(self, *args, **kwargs):
super(ImageBasedCrossEntropy2D, self).__init__(*args, **kwargs)
def forward(self, logit, target):
assert len(logit.shape) == 4 and len(target.shape) == 3
# Get the size of `logit`
n, c, h, w = logit.size()
# Init weight
if self.custom_weight is not None:
weight = self.custom_weight.to(logit.device)
else:
if self.batch_weight:
weight = self.calculate_weights(target, c).to(logit.device)
else:
weight = None
# Calculate the loss
loss = F.nll_loss(F.log_softmax(logit, dim=1), target.long(), weight,
ignore_index=self.ignore_index, reduction='sum')
# loss will be divided by (h * w)
if self.size_average:
loss /= (h * w)
# loss will be divided by (n)
if self.batch_average:
loss /= n
return loss
class BoundariesRelaxation2D(BasicLossModule):
"""
The boundaries relaxation loss, which details can be seen here:
https://ieeexplore.ieee.org/abstract/document/8954327
Params:
ignore_index: int (default 255). Categories to be ignored of `target`
custom_weight: numpy array or list (default None). The weight of each category. For example,
[0.2, 0.1, ..., 0.1], if `custom_weight` is not None, `batch_weight` will be ignored
batch_weight: bool (default True). If true, the whole batch is used to calculate weights
size_average: bool (default True). Loss will be divided by (h * w)
batch_average: bool (default True). Loss will be divided by (n)
window_size: int, list or tuple (default 3). The slide window size of boundaries relaxation loss
stride: int, list or tuple (default 1). The strode of slide window
forward:
logit: 4-D torch.Tensor. The predict result without `sigmoid/softmax`, which shape is (n, c, h, w)
target: 3-D torch.Tensor. The input target which shape is (n, h, w)
Note that there's no need to use `softmax/sigmoid` for `logit` before calling this loss module.
"""
def __init__(self, window_size=3, stride=1, *args, **kwargs):
super(BoundariesRelaxation2D, self).__init__(*args, **kwargs)
# Init window size
if isinstance(window_size, int):
window_size = (window_size, window_size)
elif isinstance(window_size, list or tuple) and len(window_size) == 2:
window_size = tuple(window_size)
else:
raise AttributeError('Expect type of `window_size`: int, 2-elem list or tuple')
# Init stride
if isinstance(stride, int):
stride = (stride, stride)
elif isinstance(stride, list or tuple) and len(stride) == 2:
stride = tuple(stride)
else:
raise AttributeError('Expect type of `stride`: int, 2-elem list or tuple')
self.window_size = window_size
self.stride = stride
self.pool2d = nn.AvgPool2d(kernel_size=self.window_size, stride=self.stride)
def forward(self, logit, target):
# Get the size of `logit`
n, c, h, w = logit.size()
# Init weight
if self.custom_weight is not None:
weight = self.custom_weight.to(logit.device)
else:
if self.batch_weight:
weight = self.calculate_weights(target, c).to(logit.device)
else:
weight = None
# Get soft output of `logit`
logit_soft = F.softmax(logit, dim=1)
# Get `ignore_index`
ignore_index = (target == self.ignore_index)
# Convert 3-D `target` Tensor to 4-D `onehot` Tensor
target_clamps = target.clone()
target_clamps[ignore_index] = c - 1
target_onehot = F.one_hot(target_clamps.long(), c) # n,h,w,c
target_onehot[ignore_index] = 0 # n,h,w,c
target_onehot_trans = target_onehot.transpose(2, 3).transpose(1, 2) # n,c,h,w
# Get the boundaries relaxation result of `logit` and `target`
logit_br = self.pool2d(logit_soft)
target_br = self.pool2d(target_onehot_trans.float())
# Get loss, note that the loss' lower bound is 0
loss = - target_br * (torch.log(logit_br + 1e-14) - torch.log(target_br + 1e-14))
# Get new loss if `weight` is not None
if weight is not None:
weight_matrix = weight.unsqueeze(0).unsqueeze(2).unsqueeze(3) # 1,c,1,1
loss = loss * weight_matrix
# Get sum of loss
loss = loss.sum()
# loss will be divided by (h * w)
if self.size_average:
loss /= (h * w)
# loss will be divided by (n)
if self.batch_average:
loss /= n
return loss
class LabelSmoothCrossEntropy(BasicLossModule):
"""
Labels Smooth Loss for classification
Params:
ignore_index: int (default 255). Categories to be ignored of `target`
custom_weight: numpy array or list (default None). The weight of each category. For example,
[0.2, 0.1, ..., 0.1], if `custom_weight` is not None, `batch_weight` will be ignored
batch_weight: bool (default True). If true, the whole batch is used to calculate weights
size_average: bool (default True). Loss will be divided by (h * w) (--unused for this version)
batch_average: bool (default True). Loss will be divided by (n)
Forward:
logit: 2-D torch.Tensor. The predict result without `sigmoid/softmax`, which shape is (n, c)
target: 1-D torch.Tensor. The input target which shape is (n)
Note that there's no need to use `softmax/sigmoid` for `logit` before calling this loss module.
"""
def __init__(self, epsilon=0.1, *args, **kwargs):
super(LabelSmoothCrossEntropy, self).__init__(*args, **kwargs)
self.epsilon = epsilon
self.log_softmax = nn.LogSoftmax(dim=1)
def forward(self, logit, target):
assert len(logit.shape) == 2 and len(target.shape) == 1
# Get the size of `logit`
n, c = logit.size()
# Get `ign_index` and `pos_index`
ign_index = (target == self.ignore_index)
pos_index = (target != self.ignore_index)
pos_index = pos_index.data.cpu().numpy().astype(bool)
# Init weight
weight = torch.Tensor(logit.size()).fill_(0).numpy()
if self.custom_weight is not None:
weight[pos_index, :] = self.custom_weight
weight = torch.from_numpy(weight).to(logit.device)
else:
if self.batch_weight:
weight[pos_index, :] = self.calculate_weights(target, c).data.cpu().numpy()
weight = torch.from_numpy(weight).to(logit.device)
else:
weight = None
# Convert 1-D `target` Tensor to 2-D `onehot` Tensor
target_clamps = target.clone()
target_clamps[ign_index] = c - 1
target_onehot = F.one_hot(target_clamps.long(), c) # n,c
# Soft target and log logit
target_soft = (1 - self.epsilon) * target_onehot + self.epsilon / c
log_logit = self.log_softmax(logit)
# Get sum of loss
loss = -target_soft * log_logit
if weight is not None:
loss = loss * weight
loss = loss.sum()
# # loss will be divided by (h * w)
# if self.size_average:
# loss /= (h * w)
# loss will be divided by (n)
if self.batch_average:
loss /= n
return loss
class LabelSmoothCrossEntropy2D(LabelSmoothCrossEntropy):
"""
Labels Smooth Loss 2D for segmentation
Params:
ignore_index: int (default 255). Categories to be ignored of `target`
custom_weight: numpy array or list (default None). The weight of each category. For example,
[0.2, 0.1, ..., 0.1], if `custom_weight` is not None, `batch_weight` will be ignored
batch_weight: bool (default True). If true, the whole batch is used to calculate weights
size_average: bool (default True). Loss will be divided by (h * w)
batch_average: bool (default True). Loss will be divided by (n)
Forward:
logit: 4-D torch.Tensor. The predict result without `sigmoid/softmax`, which shape is (n, c, h, w)
target: 3-D torch.Tensor. The input target which shape is (n, h, w)
Note that there's no need to use `softmax/sigmoid` for `logit` before calling this loss module.
"""
def forward(self, logit, target):
assert len(logit.shape) == 4 and len(target.shape) == 3
# Get the size of `logit`
n, c, h, w = logit.size()
# Get `ign_index` and `pos_index`
ign_index = (target == self.ignore_index)
pos_index = (target != self.ignore_index)
pos_index = pos_index.data.cpu().numpy().astype(bool) # n,h,w
# Init weight
weight = torch.Tensor(logit.size()).fill_(0).numpy().transpose((0, 2, 3, 1)) # n,h,w,c
if self.custom_weight is not None:
weight[pos_index] = self.custom_weight
weight = weight.transpose((0, 3, 1, 2)) # n,c,h,w
weight = torch.from_numpy(weight).to(logit.device)
else:
if self.batch_weight:
weight[pos_index] = self.calculate_weights(target, c).data.cpu().numpy()
weight = weight.transpose((0, 3, 1, 2)) # n,c,h,w
weight = torch.from_numpy(weight).to(logit.device)
else:
weight = None
# Convert 1-D `target` Tensor to 2-D `onehot` Tensor
target_clamps = target.clone()
target_clamps[ign_index] = c - 1
target_onehot = F.one_hot(target_clamps.long(), c) # n,h,w,c
# Soft target and log logit
target_soft = (1 - self.epsilon) * target_onehot + self.epsilon / c
target_soft = target_soft.permute((0, 3, 1, 2)) # n,c,h,w
log_logit = self.log_softmax(logit)
# Get sum of loss
loss = -target_soft * log_logit
if weight is not None:
loss = loss * weight
loss = loss.sum()
# loss will be divided by (h * w)
if self.size_average:
loss /= (h * w)
# loss will be divided by (n)
if self.batch_average:
loss /= n
return loss
if __name__ == '__main__':
size = (256, 256)
num_class = 9
batch_size = 5
# torch.manual_seed(1)
torch.clear_autocast_cache()
print('=' * 30 + ' CrossEntropy2D ' + '=' * 30)
z = torch.randn(batch_size, num_class, *size, requires_grad=True).cuda()
y = torch.randint(num_class, (batch_size, *size), dtype=torch.float).cuda()
print('z.shape: {}, y.shape: {}'.format(z.shape, y.shape))
l = CrossEntropy2D()(z, y)
print(l.detach().cpu().numpy())
print('=' * 30 + ' BinaryCrossEntropy2D ' + '=' * 30)
z = torch.randn(batch_size, *size, requires_grad=True).cuda()
y = torch.randint(2, (batch_size, *size), dtype=torch.float).cuda()
print('z.shape: {}, y.shape: {}'.format(z.shape, y.shape))
l = BinaryCrossEntropy2D()(z, y)
print(l.detach().cpu().numpy())
print()
z = torch.randn(batch_size, num_class, *size, requires_grad=True).cuda()
y = torch.randint(2, (batch_size, num_class, *size), dtype=torch.float).cuda()
print('z.shape: {}, y.shape: {}'.format(z.shape, y.shape))
l = BinaryCrossEntropy2D()(z, y)
print(l.detach().cpu().numpy())
print('=' * 30 + ' ImageBasedCrossEntropy2D ' + '=' * 30)
z = torch.randn(batch_size, num_class, *size, requires_grad=True).cuda()
y = torch.randint(num_class, (batch_size, *size), dtype=torch.float).cuda()
print('z.shape: {}, y.shape: {}'.format(z.shape, y.shape))
l = ImageBasedCrossEntropy2D(num_class, batch_weight=True)(z, y)
print(l.detach().cpu().numpy())
print('=' * 30 + ' BoundariesRelaxation2D ' + '=' * 30)
z = torch.randn(batch_size, num_class, *size, requires_grad=True).cuda()
y = torch.randint(num_class, (batch_size, *size), dtype=torch.float).cuda()
print('z.shape: {}, y.shape: {}'.format(z.shape, y.shape))
l = BoundariesRelaxation2D(window_size=10, custom_weight=np.arange(num_class) + 1)(z, y)
print(l.detach().cpu().numpy())
print('=' * 30 + ' BoundariesRelaxation2D ' + '=' * 30)
z = torch.randn(batch_size, num_class, requires_grad=True).cuda()
y = torch.randint(num_class, (batch_size,), dtype=torch.float).cuda()
y[0] = 255
print('z.shape: {}, y.shape: {}'.format(z.shape, y.shape))
l = LabelSmoothCrossEntropy(epsilon=0)(z, y)
print(l.detach().cpu().numpy()) | [
"torch.randint",
"torch.nn.LogSoftmax",
"torch.histc",
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.randn",
"torch.nn.functional.softmax",
"numpy.arange",
"numpy.array",
"torch.clear_autocast_cache",
"torch.nn.functional.log_softmax",
"torch.nn.AvgPool2d",
"torch.log",
"tor... | [((20210, 20238), 'torch.clear_autocast_cache', 'torch.clear_autocast_cache', ([], {}), '()\n', (20236, 20238), False, 'import torch\n'), ((2127, 2192), 'torch.histc', 'torch.histc', (['target'], {'bins': 'num_classes', 'min': '(0)', 'max': '(num_classes - 1)'}), '(target, bins=num_classes, min=0, max=num_classes - 1)\n', (2138, 2192), False, 'import torch\n'), ((8122, 8215), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', (['logit_rsp', 'target_rsp'], {'weight': 'weight', 'reduction': '"""sum"""'}), "(logit_rsp, target_rsp, weight=weight,\n reduction='sum')\n", (8156, 8215), True, 'import torch.nn.functional as F\n'), ((12530, 12592), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', ([], {'kernel_size': 'self.window_size', 'stride': 'self.stride'}), '(kernel_size=self.window_size, stride=self.stride)\n', (12542, 12592), True, 'import torch.nn as nn\n'), ((13072, 13095), 'torch.nn.functional.softmax', 'F.softmax', (['logit'], {'dim': '(1)'}), '(logit, dim=1)\n', (13081, 13095), True, 'import torch.nn.functional as F\n'), ((15524, 15544), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(1)'}), '(dim=1)\n', (15537, 15544), True, 'import torch.nn as nn\n'), ((10050, 10077), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['logit'], {'dim': '(1)'}), '(logit, dim=1)\n', (10063, 10077), True, 'import torch.nn.functional as F\n'), ((20303, 20364), 'torch.randn', 'torch.randn', (['batch_size', 'num_class', '*size'], {'requires_grad': '(True)'}), '(batch_size, num_class, *size, requires_grad=True)\n', (20314, 20364), False, 'import torch\n'), ((20381, 20445), 'torch.randint', 'torch.randint', (['num_class', '(batch_size, *size)'], {'dtype': 'torch.float'}), '(num_class, (batch_size, *size), dtype=torch.float)\n', (20394, 20445), False, 'import torch\n'), ((20656, 20706), 'torch.randn', 'torch.randn', (['batch_size', '*size'], {'requires_grad': '(True)'}), '(batch_size, *size, requires_grad=True)\n', (20667, 20706), False, 'import torch\n'), ((20723, 20779), 'torch.randint', 'torch.randint', (['(2)', '(batch_size, *size)'], {'dtype': 'torch.float'}), '(2, (batch_size, *size), dtype=torch.float)\n', (20736, 20779), False, 'import torch\n'), ((20952, 21013), 'torch.randn', 'torch.randn', (['batch_size', 'num_class', '*size'], {'requires_grad': '(True)'}), '(batch_size, num_class, *size, requires_grad=True)\n', (20963, 21013), False, 'import torch\n'), ((21030, 21097), 'torch.randint', 'torch.randint', (['(2)', '(batch_size, num_class, *size)'], {'dtype': 'torch.float'}), '(2, (batch_size, num_class, *size), dtype=torch.float)\n', (21043, 21097), False, 'import torch\n'), ((21318, 21379), 'torch.randn', 'torch.randn', (['batch_size', 'num_class', '*size'], {'requires_grad': '(True)'}), '(batch_size, num_class, *size, requires_grad=True)\n', (21329, 21379), False, 'import torch\n'), ((21396, 21460), 'torch.randint', 'torch.randint', (['num_class', '(batch_size, *size)'], {'dtype': 'torch.float'}), '(num_class, (batch_size, *size), dtype=torch.float)\n', (21409, 21460), False, 'import torch\n'), ((21711, 21772), 'torch.randn', 'torch.randn', (['batch_size', 'num_class', '*size'], {'requires_grad': '(True)'}), '(batch_size, num_class, *size, requires_grad=True)\n', (21722, 21772), False, 'import torch\n'), ((21789, 21853), 'torch.randint', 'torch.randint', (['num_class', '(batch_size, *size)'], {'dtype': 'torch.float'}), '(num_class, (batch_size, *size), dtype=torch.float)\n', (21802, 21853), False, 'import torch\n'), ((22128, 22182), 'torch.randn', 'torch.randn', (['batch_size', 'num_class'], {'requires_grad': '(True)'}), '(batch_size, num_class, requires_grad=True)\n', (22139, 22182), False, 'import torch\n'), ((22199, 22257), 'torch.randint', 'torch.randint', (['num_class', '(batch_size,)'], {'dtype': 'torch.float'}), '(num_class, (batch_size,), dtype=torch.float)\n', (22212, 22257), False, 'import torch\n'), ((13813, 13840), 'torch.log', 'torch.log', (['(logit_br + 1e-14)'], {}), '(logit_br + 1e-14)\n', (13822, 13840), False, 'import torch\n'), ((13843, 13871), 'torch.log', 'torch.log', (['(target_br + 1e-14)'], {}), '(target_br + 1e-14)\n', (13852, 13871), False, 'import torch\n'), ((16137, 16161), 'torch.from_numpy', 'torch.from_numpy', (['weight'], {}), '(weight)\n', (16153, 16161), False, 'import torch\n'), ((18880, 18904), 'torch.from_numpy', 'torch.from_numpy', (['weight'], {}), '(weight)\n', (18896, 18904), False, 'import torch\n'), ((21987, 22007), 'numpy.arange', 'np.arange', (['num_class'], {}), '(num_class)\n', (21996, 22007), True, 'import numpy as np\n'), ((1487, 1518), 'torch.from_numpy', 'torch.from_numpy', (['custom_weight'], {}), '(custom_weight)\n', (1503, 1518), False, 'import torch\n'), ((16348, 16372), 'torch.from_numpy', 'torch.from_numpy', (['weight'], {}), '(weight)\n', (16364, 16372), False, 'import torch\n'), ((19156, 19180), 'torch.from_numpy', 'torch.from_numpy', (['weight'], {}), '(weight)\n', (19172, 19180), False, 'import torch\n'), ((1384, 1425), 'numpy.array', 'np.array', (['custom_weight'], {'dtype': 'np.float32'}), '(custom_weight, dtype=np.float32)\n', (1392, 1425), True, 'import numpy as np\n')] |
"""
Mask R-CNN
Configurations and data loading code for MS COCO.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by <NAME>
------------------------------------------------------------
Usage: import the module (see Jupyter notebooks for examples), or run from
the command line as such:
# Train a new model starting from pre-trained COCO weights
python3 coco.py train --dataset=/path/to/coco/ --model=coco
# Train a new model starting from ImageNet weights. Also auto download COCO dataset
python3 coco.py train --dataset=/path/to/coco/ --model=imagenet --download=True
# Continue training a model that you had trained earlier
python3 coco.py train --dataset=/path/to/coco/ --model=/path/to/weights.h5
# Continue training the last model you trained
python3 coco.py train --dataset=/path/to/coco/ --model=last
# Run COCO evaluatoin on the last model you trained
python3 coco.py evaluate --dataset=/path/to/coco/ --model=last
"""
import os
import sys
import time
import numpy as np
# import imgaug # https://github.com/aleju/imgaug (pip3 install imgaug)
import zipfile
import urllib.request
import shutil
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import model as modellib, utils
# Path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
DEFAULT_DATASET_YEAR = "2014"
############################################################
# Configurations
############################################################
class MyConfig(Config):
"""
Configuration for training on the our own dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "mask"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 7 # Background + my
# Number of training steps per epoch
STEPS_PER_EPOCH = 100
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
BACKBONE = "resnet101"
IMAGE_RESIZE_MODE = "square"
IMAGE_MIN_DIM = 800
IMAGE_MAX_DIM = 1024
IMAGE_MIN_SCALE = 0
# BACKBONE = "resnet50"
# BACKBONE_STRIDES = [4, 8, 16, 32, 64]
# # BACKBONE_STRIDES = [2, 4, 8, 16, 32]
# RPN_ANCHOR_SCALES = (10, 32, 64, 128, 256)
# RPN_ANCHOR_STRIDE = 2
# RPN_NMS_THRESHOLD = 0.9
# RPN_TRAIN_ANCHORS_PER_IMAGE = 512
# TRAIN_ROIS_PER_IMAGE = 512
############################################################
# Dataset
############################################################
class MyDataset(utils.Dataset):
def print_size(self, poly):
for p in poly:
a = np.array(p['all_points_y'])
height = a.max() - a.min()
a = np.array(p['all_points_x'])
width = a.max() - a.min()
self.areas.append(height * width)
#if height * width < 4096:
# print(width, height)
def load_my(self, dataset_dir, subset, class_dict):
"""Load a subset of the My dataset.
dataset_dir: Root directory of the dataset.
subset: Subset to load: train or val
"""
self.areas = []
# Add classes. We have only one class to add.
for (k, v) in class_dict.items():
self.add_class("my", v, k)
# Train or validation dataset?
assert subset in ["train", "val"]
dataset_dir = os.path.join(dataset_dir, subset)
# Load annotations
# VGG Image Annotator (up to version 1.6) saves each image in the form:
# { 'filename': '28503151_5b5b7ec140_b.jpg',
# 'regions': {
# '0': {
# 'region_attributes': {},
# 'shape_attributes': {
# 'all_points_x': [...],
# 'all_points_y': [...],
# 'name': 'polygon'}},
# ... more regions ...
# },
# 'size': 100202
# }
# We mostly care about the x and y coordinates of each region
# Note: In VIA 2.0, regions was changed from a dict to a list.
annotations = json.load(open(os.path.join(dataset_dir, "via_region_data.json")))
annotations = list(annotations.values()) # don't need the dict keys
# The VIA tool saves images in the JSON even if they don't have any
# annotations. Skip unannotated images.
annotations = [a for a in annotations if a['regions']]
# Add images
for a in annotations:
# Get the x, y coordinaets of points of the polygons that make up
# the outline of each object instance. These are stores in the
# shape_attributes (see json format above)
# The if condition is needed to support VIA versions 1.x and 2.x.
# print(a['regions'])
# print(a['filename'])
if type(a['regions']) is dict:
polygons = [r['shape_attributes'] for r in a['regions'].values()]
else:
if a['regions']:
class_ids = []
polygons = []
for r in a['regions']:
polygons.append(r['shape_attributes'])
class_type = r['region_attributes']['type']
class_ids.append(class_dict[class_type])
self.print_size(polygons)
# print(class_ids)
# load_mask() needs the image size to convert polygons to masks.
# Unfortunately, VIA doesn't include it in JSON, so we must read
# the image. This is only managable since the dataset is tiny.
image_path = os.path.join(dataset_dir, a['filename'])
image = skimage.io.imread(image_path)
height, width = image.shape[:2]
self.add_image(
"my",
image_id=a['filename'], # use file name as a unique image id
path=image_path,
width=width, height=height,
polygons=polygons,
class_ids=class_ids)
self.areas.sort()
print(np.unique(np.round(np.sqrt(self.areas))))
def load_mask(self, image_id):
"""Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a my dataset image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "my":
return super(self.__class__, self).load_mask(image_id)
# Convert polygons to a bitmap mask of shape
# [height, width, instance_count]
info = self.image_info[image_id]
mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
dtype=np.uint8)
for i, p in enumerate(info["polygons"]):
# Get indexes of pixels inside the polygon and set them to 1
rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
mask[rr, cc, i] = 1
class_ids = np.array(info['class_ids'])
# Return mask, and array of class IDs of each instance. Since we have
# one class ID only, we return an array of 1s
return mask.astype(np.bool), class_ids.astype(np.int32)
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
if info["source"] == "my":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id) | [
"sys.path.append",
"os.path.abspath",
"numpy.array",
"os.path.join",
"numpy.sqrt"
] | [((1256, 1281), 'os.path.abspath', 'os.path.abspath', (['"""../../"""'], {}), "('../../')\n", (1271, 1281), False, 'import os\n'), ((1302, 1327), 'sys.path.append', 'sys.path.append', (['ROOT_DIR'], {}), '(ROOT_DIR)\n', (1317, 1327), False, 'import sys\n'), ((1493, 1536), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""mask_rcnn_coco.h5"""'], {}), "(ROOT_DIR, 'mask_rcnn_coco.h5')\n", (1505, 1536), False, 'import os\n'), ((1664, 1694), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""logs"""'], {}), "(ROOT_DIR, 'logs')\n", (1676, 1694), False, 'import os\n'), ((3881, 3914), 'os.path.join', 'os.path.join', (['dataset_dir', 'subset'], {}), '(dataset_dir, subset)\n', (3893, 3914), False, 'import os\n'), ((7818, 7845), 'numpy.array', 'np.array', (["info['class_ids']"], {}), "(info['class_ids'])\n", (7826, 7845), True, 'import numpy as np\n'), ((3136, 3163), 'numpy.array', 'np.array', (["p['all_points_y']"], {}), "(p['all_points_y'])\n", (3144, 3163), True, 'import numpy as np\n'), ((3219, 3246), 'numpy.array', 'np.array', (["p['all_points_x']"], {}), "(p['all_points_x'])\n", (3227, 3246), True, 'import numpy as np\n'), ((4619, 4668), 'os.path.join', 'os.path.join', (['dataset_dir', '"""via_region_data.json"""'], {}), "(dataset_dir, 'via_region_data.json')\n", (4631, 4668), False, 'import os\n'), ((6225, 6265), 'os.path.join', 'os.path.join', (['dataset_dir', "a['filename']"], {}), "(dataset_dir, a['filename'])\n", (6237, 6265), False, 'import os\n'), ((6769, 6788), 'numpy.sqrt', 'np.sqrt', (['self.areas'], {}), '(self.areas)\n', (6776, 6788), True, 'import numpy as np\n')] |
import numpy as np
from ndsimulator.data import AllData
from ndsimulator.constant import kB
class PEBias(AllData):
"""The PE_bias object implements bias that based on potential energy
It penalize all the configuration that has a potential energy lower than a thredshold
"""
def __init__(
self,
thred=0.0,
run=None,
):
self.thred = thred
super(PEBias, self).__init__(run)
def initialize(self, pointer):
AllData.__init__(self, run=pointer)
self.VR = 0
def update(self, step, time):
pass
def compute(self, x0=None, col0=None):
# TO DO check the force form
if x0 is None:
x = self.atoms.positions
pe = self.atoms.pe
f = self.atoms.forces
else:
x = x0
pe, f = self.potential.compute(x)
if pe < self.thred:
f = -np.array(f)
V = self.thred - pe
else:
f = np.zeros(self.ndim)
V = 0
if x0 is None:
self.VR = V
return V, f
def force(self, x0=None, col0=None):
# TO DO check the force form
if x0 is None:
x = self.atoms.x
pe = self.atoms.pe
f = self.atoms.forces
else:
x = x0
pe, f = self.potential.compute(x)
if pe < self.thred:
return -np.array(f)
else:
return np.zeros(self.ndim)
def energy(self, x0=None, col0=None):
if x0 is None:
x = self.atoms.positions
pe = self.atoms.pe
else:
x = x0
pe, f = self.potential.compute(x)
if pe < self.thred:
V = self.thred - pe
else:
V = 0
if x0 is None:
self.VR = V
return V
def dump_data(self):
line = ""
return line
def projection(self, X, Y):
pass
| [
"numpy.array",
"ndsimulator.data.AllData.__init__",
"numpy.zeros"
] | [((481, 516), 'ndsimulator.data.AllData.__init__', 'AllData.__init__', (['self'], {'run': 'pointer'}), '(self, run=pointer)\n', (497, 516), False, 'from ndsimulator.data import AllData\n'), ((992, 1011), 'numpy.zeros', 'np.zeros', (['self.ndim'], {}), '(self.ndim)\n', (1000, 1011), True, 'import numpy as np\n'), ((1468, 1487), 'numpy.zeros', 'np.zeros', (['self.ndim'], {}), '(self.ndim)\n', (1476, 1487), True, 'import numpy as np\n'), ((918, 929), 'numpy.array', 'np.array', (['f'], {}), '(f)\n', (926, 929), True, 'import numpy as np\n'), ((1423, 1434), 'numpy.array', 'np.array', (['f'], {}), '(f)\n', (1431, 1434), True, 'import numpy as np\n')] |
from __future__ import annotations
import numpy as np
from edutorch.typing import NPArray
from .module import Module
class SpatialGroupNorm(Module):
def __init__(self, num_features: int, G: int, eps: float = 1e-5) -> None:
super().__init__()
self.eps = eps
self.G = G
self.gamma = np.zeros(num_features)
self.beta = np.zeros(num_features)
self.set_parameters("gamma", "beta")
def forward(self, x: NPArray) -> NPArray:
"""
Computes the forward pass for spatial group normalization.
In contrast to layer normalization, group normalization splits each entry
in the data into G contiguous pieces, which it then normalizes independently.
Per feature shifting and scaling are then applied to the data, in a manner
identical to that of batch normalization and layer normalization.
Inputs:
- x: Input data of shape (N, C, H, W)
- gamma: Scale parameter, of shape (C,)
- beta: Shift parameter, of shape (C,)
- G: Integer mumber of groups to split into, should be a divisor of C
- gn_param: Dictionary with the following keys:
- eps: Constant for numeric stability
Returns a tuple of:
- out: Output data, of shape (N, C, H, W)
- cache: Values needed for the backward pass
"""
N, C, H, W = x.shape
self.gamma = self.gamma.reshape((1, C, 1, 1))
self.beta = self.beta.reshape((1, C, 1, 1))
x = x.reshape(N * self.G, -1).T
sample_mean = np.mean(x, axis=0)
sample_var = np.var(x, axis=0)
v = np.sqrt(sample_var + self.eps)
x_hat = (x - sample_mean) / v
x_hat = x_hat.T.reshape(N, C, H, W)
out = self.gamma * x_hat + self.beta
self.cache = (x_hat, v)
return out
def backward(self, dout: NPArray) -> tuple[NPArray, ...]:
"""
Computes the backward pass for spatial group normalization.
Inputs:
- dout: Upstream derivatives, of shape (N, C, H, W)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient with respect to inputs, of shape (N, C, H, W)
- dgamma: Gradient with respect to scale parameter, of shape (C,)
- dbeta: Gradient with respect to shift parameter, of shape (C,)
"""
x_hat, v = self.cache
N, C, H, W = dout.shape
dx_hat = dout * self.gamma
dgamma = np.sum(dout * x_hat, axis=(0, 2, 3), keepdims=True)
dbeta = np.sum(dout, axis=(0, 2, 3), keepdims=True)
x_hat = x_hat.reshape(N * self.G, -1).T
dx_hat = dx_hat.reshape(N * self.G, -1).T
dx = (
dx_hat - np.mean(dx_hat, axis=0) - x_hat * np.mean(dx_hat * x_hat, axis=0)
) / v
dx = dx.T.reshape(N, C, H, W)
return dx, dgamma, dbeta
| [
"numpy.sum",
"numpy.zeros",
"numpy.mean",
"numpy.var",
"numpy.sqrt"
] | [((323, 345), 'numpy.zeros', 'np.zeros', (['num_features'], {}), '(num_features)\n', (331, 345), True, 'import numpy as np\n'), ((366, 388), 'numpy.zeros', 'np.zeros', (['num_features'], {}), '(num_features)\n', (374, 388), True, 'import numpy as np\n'), ((1565, 1583), 'numpy.mean', 'np.mean', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1572, 1583), True, 'import numpy as np\n'), ((1605, 1622), 'numpy.var', 'np.var', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (1611, 1622), True, 'import numpy as np\n'), ((1635, 1665), 'numpy.sqrt', 'np.sqrt', (['(sample_var + self.eps)'], {}), '(sample_var + self.eps)\n', (1642, 1665), True, 'import numpy as np\n'), ((2484, 2535), 'numpy.sum', 'np.sum', (['(dout * x_hat)'], {'axis': '(0, 2, 3)', 'keepdims': '(True)'}), '(dout * x_hat, axis=(0, 2, 3), keepdims=True)\n', (2490, 2535), True, 'import numpy as np\n'), ((2552, 2595), 'numpy.sum', 'np.sum', (['dout'], {'axis': '(0, 2, 3)', 'keepdims': '(True)'}), '(dout, axis=(0, 2, 3), keepdims=True)\n', (2558, 2595), True, 'import numpy as np\n'), ((2731, 2754), 'numpy.mean', 'np.mean', (['dx_hat'], {'axis': '(0)'}), '(dx_hat, axis=0)\n', (2738, 2754), True, 'import numpy as np\n'), ((2765, 2796), 'numpy.mean', 'np.mean', (['(dx_hat * x_hat)'], {'axis': '(0)'}), '(dx_hat * x_hat, axis=0)\n', (2772, 2796), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Coordinator handles interaction between internal components of AndroidEnv."""
import copy
import socket
import time
from typing import Any, Dict, Optional, Tuple
from absl import logging
from android_env.components import action_type as action_type_lib
from android_env.components import base_simulator
from android_env.components import errors
from android_env.components import specs
from android_env.components import task_manager as task_manager_lib
import dm_env
import numpy as np
class Coordinator():
"""Handles interaction between internal components of AndroidEnv."""
def __init__(
self,
simulator: base_simulator.BaseSimulator,
task_manager: task_manager_lib.TaskManager,
step_timeout_sec: int = 10,
max_steps_per_sec: float = 5.0,
periodic_restart_time_min: float = 0.0,
force_simulator_launch: bool = True,
):
"""Handles communication between AndroidEnv and its components.
Args:
simulator: A BaseSimulator instance.
task_manager: The TaskManager, responsible for coordinating RL tasks.
step_timeout_sec: Timeout in seconds between steps. If step is not called
within that time, the episode will reset at the next step. Set to 0 to
disable.
max_steps_per_sec: Maximum steps per second. If the simulator is
faster, the Coordinator will wait before returning an observation.
periodic_restart_time_min: Time between periodic restarts in minutes.
If > 0.0, will trigger a simulator restart at the end of the next
episode once the time has been reached.
force_simulator_launch: Forces the simulator to relaunch even if it is
already launched.
"""
self._simulator = simulator
self._task_manager = task_manager
self._step_timeout_sec = step_timeout_sec
self._max_steps_per_sec = max_steps_per_sec
self._periodic_restart_time_min = periodic_restart_time_min
self._force_simulator_launch = force_simulator_launch
# Logging settings.
self._log_dict = {
'total_steps': 0,
'episode_steps': 0,
'restart_count': 0,
'restart_count_periodic': 0,
'restart_count_setup_steps': 0,
'restart_count_reset_steps': 0,
'restart_count_simulator_launch': 0,
'restart_count_simulator_reset': 0,
'restart_count_execute_action': 0,
'restart_count_fetch_observation': 0,
'reset_count_step_timeout': 0,
}
# Initialize counters.
self._should_restart = False
self._latest_observation_time = None
self._simulator_start_time = None
self._restart_simulator()
def action_spec(self) -> Dict[str, dm_env.specs.Array]:
return specs.base_action_spec()
def observation_spec(self) -> Dict[str, dm_env.specs.Array]:
screen_dims = self._simulator.screen_dimensions()
return specs.base_observation_spec(
height=screen_dims[0], width=screen_dims[1])
def task_extras_spec(self) -> Dict[str, dm_env.specs.Array]:
return specs.base_task_extras_spec(task=self._task_manager.task())
def reset_environment_state(self):
"""Resets the state of the simulation for a new RL episode.
This involves resetting relevant counters and performing reset steps
specific to the running task (e.g. restarting an application). A lift
action is also performed to ensure that sequential touches are not
interconnected between separate RL episodes.
"""
# Restart the simulation if neccessary.
if self._should_restart or self._should_periodic_restart():
self._restart_simulator()
# Reset counters.
self._latest_observation_time = None
for key in self._log_dict:
if key.startswith('episode'):
self._log_dict[key] = 0.0
# Execute a lift action before resetting the task.
self._send_action_to_simulator({
'action_type': np.array(action_type_lib.ActionType.LIFT),
'touch_position': np.array([0, 0]),
})
# Reset the task.
try:
self._task_manager.reset_task()
self._simulator.update_device_orientation()
except errors.StepCommandError:
logging.exception('Failed to reset the task. Restarting simulator.')
self._log_dict['restart_count_simulator_reset'] += 1
self._should_restart = True
def _should_periodic_restart(self) -> bool:
"""Checks if it is time to restart the simulator.
If a periodic restart time was specified, the Coordinator will re-launch
the simulator at regular time intervals. This helps to make sure that the
simulator is not is a stale state even if the environment has been running
for a significant amount of time.
Returns:
Boolean indicating if it is time to restart the simulator.
"""
if self._periodic_restart_time_min and self._simulator_start_time:
sim_alive_time = (time.time() - self._simulator_start_time) / 60.0
logging.info('Simulator has been running for %f mins', sim_alive_time)
if sim_alive_time > self._periodic_restart_time_min:
logging.info('Maximum alive time reached. Restarting simulator.')
self._log_dict['restart_count_periodic'] += 1
return True
return False
def _restart_simulator(self, max_retries: int = 3):
"""Restarts the simulation.
Closes and re-launches the system, restarting the simulator process and
reinitializing the task in the newly started simulator.
Args:
max_retries: Number of times to attempt a restart before raising an error.
"""
# Reset counters.
self._should_restart = False
# Attempt to restart the system a given number of times.
num_tries = 1
while True:
if num_tries > max_retries:
logging.error('Maximum number of restarts reached.')
raise errors.TooManyRestartsError
logging.info('Simulator launch attempt %d of %d', num_tries, max_retries)
# Launch the simulator (will restart if already launched).
try:
if self._force_simulator_launch or not self._simulator.is_launched():
self._task_manager.pause_task()
self._simulator.launch()
self._simulator_start_time = time.time()
adb_controller = self._simulator.create_adb_controller()
except errors.AdbControllerError:
logging.error('Error launching the simulator.')
self._log_dict['restart_count_simulator_launch'] += 1
num_tries += 1
continue
# Start the task.
try:
self._task_manager.setup_task(adb_controller=adb_controller)
except errors.StepCommandError:
logging.error('Failed to set up the task. Restarting simulator.')
self._log_dict['restart_count_setup_steps'] += 1
num_tries += 1
continue
# Restart was successful.
break
def execute_action(
self,
action: Optional[Dict[str, np.ndarray]],
) -> Tuple[Optional[Dict[str, np.ndarray]], float, Dict[str, Any], bool]:
"""Executes the selected action and returns transition info.
Args:
action: Selected action to perform on the simulated Android device.
Returns:
observation: Pixel observations as displayed on the screen.
reward: Total reward collected since the last call.
extras: Task extras observed since the last call.
episode_end: Boolean indicating if the RL episode should be terminated.
"""
# Increment counters.
self._task_manager.increment_steps()
if action is not None:
self._log_dict['total_steps'] += 1
self._log_dict['episode_steps'] += 1
# If a restart is neccessary, end the episode.
if self._should_restart or self._check_timeout():
return None, 0.0, {}, True
# If the action is a TOUCH or LIFT, send it to the simulator.
if (action is not None and
action['action_type'].item() != action_type_lib.ActionType.REPEAT):
self._send_action_to_simulator(action)
# Sleep to maintain a steady interaction rate.
self._wait_for_next_frame()
# Read necessary transition information and return it to the agent.
try:
self._latest_observation_time = time.time()
observation = self._simulator.get_observation()
reward = self._task_manager.get_current_reward()
task_extras = self._task_manager.get_current_extras()
episode_end = self._task_manager.check_if_episode_ended()
return observation, reward, task_extras, episode_end
except (errors.ReadObservationError, socket.error):
logging.exception('Unable to fetch observation. Restarting simulator.')
self._log_dict['restart_count_fetch_observation'] += 1
self._should_restart = True
return None, 0.0, {}, True
def _send_action_to_simulator(self, action: Dict[str, np.ndarray]) -> None:
"""Sends the selected action to the simulator.
The simulator will interpret the action as a touchscreen event and perform
it accordingly. The effect this action triggers in the Android OS will be
determined by the currently running application.
Args:
action: action which will get interpreted as a touchscreen event.
"""
try:
self._simulator.send_action(action)
except (socket.error, errors.SendActionError):
logging.exception('Unable to execute action. Restarting simulator.')
self._log_dict['restart_count_execute_action'] += 1
self._should_restart = True
def _check_timeout(self) -> bool:
"""Checks if timeout between steps have exceeded.
If too much time has passed since the last step was performed, it is assumed
that the simulation is in a bad state, so the Coordinator will re-launch
the simulator to make sure interaction proceeds from a clean state.
Returns:
Boolean indicating if the step timeout limit has been reached.
"""
if self._step_timeout_sec and self._latest_observation_time:
time_since_last_obs = self._get_time_since_last_observation()
if time_since_last_obs > self._step_timeout_sec:
self._should_restart = True
return True
return False
def _wait_for_next_frame(self) -> None:
"""Pauses the environment so that the interaction is around 1/FPS."""
time_since_observation = self._get_time_since_last_observation()
time_to_wait = 1. / self._max_steps_per_sec - time_since_observation
if time_to_wait > 0.0:
time.sleep(time_to_wait)
def _get_time_since_last_observation(self) -> float:
"""Computes time passed since the last observation was fetched."""
if self._latest_observation_time is not None:
return time.time() - self._latest_observation_time
else:
return np.inf
def get_logs(self) -> Dict[str, Any]:
"""Returns internal counter values."""
log_dict = copy.deepcopy(self._log_dict)
log_dict.update(self._task_manager.log_dict())
return log_dict
def close(self):
"""Cleans up the state of this Coordinator."""
if hasattr(self, '_task_manager'):
self._task_manager.close()
if hasattr(self, '_simulator'):
self._simulator.close()
| [
"copy.deepcopy",
"absl.logging.exception",
"android_env.components.specs.base_action_spec",
"time.time",
"absl.logging.info",
"time.sleep",
"android_env.components.specs.base_observation_spec",
"numpy.array",
"absl.logging.error"
] | [((3314, 3338), 'android_env.components.specs.base_action_spec', 'specs.base_action_spec', ([], {}), '()\n', (3336, 3338), False, 'from android_env.components import specs\n'), ((3468, 3540), 'android_env.components.specs.base_observation_spec', 'specs.base_observation_spec', ([], {'height': 'screen_dims[0]', 'width': 'screen_dims[1]'}), '(height=screen_dims[0], width=screen_dims[1])\n', (3495, 3540), False, 'from android_env.components import specs\n'), ((11362, 11391), 'copy.deepcopy', 'copy.deepcopy', (['self._log_dict'], {}), '(self._log_dict)\n', (11375, 11391), False, 'import copy\n'), ((5515, 5585), 'absl.logging.info', 'logging.info', (['"""Simulator has been running for %f mins"""', 'sim_alive_time'], {}), "('Simulator has been running for %f mins', sim_alive_time)\n", (5527, 5585), False, 'from absl import logging\n'), ((6429, 6502), 'absl.logging.info', 'logging.info', (['"""Simulator launch attempt %d of %d"""', 'num_tries', 'max_retries'], {}), "('Simulator launch attempt %d of %d', num_tries, max_retries)\n", (6441, 6502), False, 'from absl import logging\n'), ((8736, 8747), 'time.time', 'time.time', ([], {}), '()\n', (8745, 8747), False, 'import time\n'), ((10972, 10996), 'time.sleep', 'time.sleep', (['time_to_wait'], {}), '(time_to_wait)\n', (10982, 10996), False, 'import time\n'), ((4485, 4526), 'numpy.array', 'np.array', (['action_type_lib.ActionType.LIFT'], {}), '(action_type_lib.ActionType.LIFT)\n', (4493, 4526), True, 'import numpy as np\n'), ((4554, 4570), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (4562, 4570), True, 'import numpy as np\n'), ((4741, 4809), 'absl.logging.exception', 'logging.exception', (['"""Failed to reset the task. Restarting simulator."""'], {}), "('Failed to reset the task. Restarting simulator.')\n", (4758, 4809), False, 'from absl import logging\n'), ((5653, 5718), 'absl.logging.info', 'logging.info', (['"""Maximum alive time reached. Restarting simulator."""'], {}), "('Maximum alive time reached. Restarting simulator.')\n", (5665, 5718), False, 'from absl import logging\n'), ((6328, 6380), 'absl.logging.error', 'logging.error', (['"""Maximum number of restarts reached."""'], {}), "('Maximum number of restarts reached.')\n", (6341, 6380), False, 'from absl import logging\n'), ((9102, 9173), 'absl.logging.exception', 'logging.exception', (['"""Unable to fetch observation. Restarting simulator."""'], {}), "('Unable to fetch observation. Restarting simulator.')\n", (9119, 9173), False, 'from absl import logging\n'), ((9843, 9911), 'absl.logging.exception', 'logging.exception', (['"""Unable to execute action. Restarting simulator."""'], {}), "('Unable to execute action. Restarting simulator.')\n", (9860, 9911), False, 'from absl import logging\n'), ((11188, 11199), 'time.time', 'time.time', ([], {}), '()\n', (11197, 11199), False, 'import time\n'), ((5460, 5471), 'time.time', 'time.time', ([], {}), '()\n', (5469, 5471), False, 'import time\n'), ((6774, 6785), 'time.time', 'time.time', ([], {}), '()\n', (6783, 6785), False, 'import time\n'), ((6899, 6946), 'absl.logging.error', 'logging.error', (['"""Error launching the simulator."""'], {}), "('Error launching the simulator.')\n", (6912, 6946), False, 'from absl import logging\n'), ((7200, 7265), 'absl.logging.error', 'logging.error', (['"""Failed to set up the task. Restarting simulator."""'], {}), "('Failed to set up the task. Restarting simulator.')\n", (7213, 7265), False, 'from absl import logging\n')] |
"""Functions to enable BMS transformations of waveforms
BMS transformations include the usual Poincaré group — time and space
translations, rotations, and boosts — as well as "supertranslations", which are
a more general form of translations. Essentially, a supertranslation is a
direction-dependent time translation.
See https://arxiv.org/abs/1509.00862 for a review of BMS transformations and
their computation.
"""
def uprime_generator(u, β):
"""Return u' such that each time step is the smallest in the input time series
Parameters
----------
u : array_like
Time steps in the original (stationary) frame
β : float
Magnitude of boost velocity as fraction of the speed of light
Returns
-------
uprime : array_like
Time steps in the boosted frame
Notes
-----
The u' time steps in the boosted frame incorporate data from a range of
different time slices in the stationary frame. Because the size of time steps
can vary as a function of time to represent dynamical data adaptively, the
timestep sizes of those different slices will differ. Here, we construct new
times for the u' series so that the u' steps are no larger than the smallest
time step on any of those slices from the input data.
We simplify this somewhat by assuming that the time step sizes in the input `u`
are fairly smoothly varying, so that the appropriate time step can be
determined just by looking at the earliest and latest time slices. This is
only an approximation, but should be suitable for our data.
"""
import numpy as np
from scipy.interpolate import CubicSpline
# uprime = u / (γ * (1 - v⃗ · n̂))
# uprime_min = max(min(u) / (γ * (1 - v⃗ · n̂)))
# uprime_max = min(max(u) / (γ * (1 - v⃗ · n̂)))
γ = 1 / np.sqrt(1 - β**2)
uprime_min = max(min(u) / (γ * (1 - β)), min(u) / (γ * (1 + β)))
uprime_max = min(max(u) / (γ * (1 - β)), max(u) / (γ * (1 + β)))
if uprime_max < uprime_min:
raise ValueError(
f"\n\tThere are no complete slices in the u' coordinate system for u ∈ [{min(u)}, {max(u)}] and β = {β}."
f"\n\tYou may wish to decrease β or move the origin of the time coordinate closer to (u[0] + u[-1]) / 2."
)
uprime = [uprime_min,]
δuprime_plus = CubicSpline(u[1:], np.diff(u / (γ * (1 + β))), extrapolate=True)
δuprime_minus = CubicSpline(u[1:], np.diff(u / (γ * (1 - β))), extrapolate=True)
while uprime[-1] < uprime_max:
δuprime = min(
δuprime_plus(uprime[-1] * γ * (1 + β)),
δuprime_minus(uprime[-1] * γ * (1 - β))
)
uprime.append(min(uprime_max, uprime[-1] + δuprime))
uprime = np.array(uprime)
return uprime
def Bprime(v⃗, n̂prime):
"""Rotor of aberration spin-weighted fields under boosts
Implements Equation (2) of arxiv.org/abs/1509.00862
Parameters
----------
v⃗ : (3,) array_like
Three-vector representing the velocity of the boosted frame relative to the
inertial frame, in units where the speed of light is 1
n̂prime : (..., 3) array_like
Three-vectors representing the directions in the boosted frame
Returns
-------
Bprm : (..., 4) quaternionic.array
Quaternions that rotate from the boosted frame to the stationary frame. In
addition to rotating n̂prime onto the corresponding n̂ directions, this will
also rotate tangent vectors appropriately, to account for spin
transformations. The shape of this array is n̂prime.shape[:-1]+(4,).
"""
import numpy as np
import quaternionic
ϵ = 1e-15
β = np.linalg.norm(v⃗)
if β < ϵ:
return quaternionic.one
v̂ = v⃗ / β
φ = np.arctanh(β)
Θprime = np.arccos(np.tensordot(v̂, n̂prime, axes=[-1, -1]))
Θ = 2 * np.arctan(np.exp(-φ) * np.tan(Θprime/2))
nv = np.cross(n̂prime, v̂)
nvnorm = np.linalg.norm(nv, axis=-1)
nv /= nvnorm[..., np.newaxis]
return np.exp(((Θprime - Θ) / 2) * quaternionic.array.from_vector_part(nv))
def boost(w, v⃗, ell_max):
"""Find modes of waveform boosted by velocity v⃗
Implements Equation (21) of arxiv.org/abs/1509.00862
Parameters
----------
w : WaveformModes
Modes of waveform measured in original frame
v⃗ : array_like
Three-vector representing the velocity of the boosted frame relative to the
inertial frame, in units where the speed of light is 1
ell_max : int
Maximum value of `ell` to use while computing the transformation, and to
provide in the returned object
Returns
-------
wprime : WaveformModes
Modes of waveform measured in boosted frame or of modes from boosted source
measured in original frame. This should have the same properties as the
input `w`, except with (1) different time data [see Notes, below], (2) a
minimum `ell` value of 0 even for spin weight other than 0, and (3) a
maximum `ell` value of `ell_max`.
Notes
-----
Due to the nature of the transformation, some of the information in the input
waveform must be discarded, because it corresponds to slices of the output
waveform that are not completely represented in the input. Thus, the times of
the output waveform will not just be the Lorentz-transformed times of the input
waveform.
Depending on the magnitude β=|v⃗|, a very large value of `ell_max` may be
needed. The dominant factor is the translation that builds up over time:
`β*T`, where `T` is the largest time found in the waveform. For example, if
β*T ≈ 1000M, we might need `ell_max=64` to maintain a comparable accuracy as in
the input data.
Because of the `β*T` effects, it is usually best to set t=0 at the merger time
— best approximated as `self.max_norm_time()`. The largest translation is then
found early in the waveform, when the waveform is changing slowly.
"""
import numpy as np
import quaternionic
import spherical
import spinsfast
if w.data_type.lower() not in ['h', 'psi4']:
raise NotImplementedError(f"Input waveform `w` has type {w.data_type}, which is not yet implemented")
ϵ = 1e-15
β = np.linalg.norm(v⃗)
if β < ϵ:
return w.copy()
γ = 1 / np.sqrt(1 - β**2)
φ = np.arctanh(β)
v̂ = v⃗ / β
nθprime = nϕprime = 2 * ell_max + 1
θprimeϕprime = spherical.theta_phi(nθprime, nϕprime)
Rprime = quaternionic.array.from_spherical_coordinates(θprimeφprime)
n̂prime = (Rprime * quaternionic.z * Rprime.inverse).vector
R = Bprime(v⃗, n̂prime) * Rprime
n̂ = (R * quaternionic.z * R.inverse).vector
doppler_factor = γ * (1 - np.tensordot(v⃗, n̂, axes=[-1, -1]))
uprime = uprime_generator(w.t, β)
time_axis, modes_axis = 0, 1
Hprime = np.zeros((uprime.size, spherical.Ysize(0, ell_max)), dtype=complex)
# Step through the waveform in segments, so that each segment is small enough to fit
# comfortably into memory, but large enough to minimize re-computation of SWSHs
i_step_size = 5_000
i_padding = 20
i_outer_1 = 0
i_inner_1 = 0
i_inner_2 = min(i_inner_1 + i_step_size, uprime.size)
i_outer_2 = min(i_inner_2 + i_padding, uprime.size)
Hprime_grid = np.zeros((i_step_size, nθprime, nϕprime), dtype=complex)
while True:
uprime_outer = uprime[i_outer_1:i_outer_2]
uprime_inner = uprime[i_inner_1:i_inner_2]
# print(f"Working on ({uprime_inner[[0, -1]]}) of ({uprime[[0, -1]]})")
# Within each segment, this is the core computation, evaluating the transformed
# field on a grid, and then converting back to mode weights
for j in range(Rprime.shape[0]):
for k in range(Rprime.shape[1]):
Rjk = R[j, k]
doppler_factor_jk = doppler_factor[j, k]
u_outer_1, u_outer_2 = uprime_outer[[0, -1]] * doppler_factor_jk
i1, i2 = w.index_closest_to(u_outer_1), w.index_closest_to(u_outer_2)
Hprime_grid[:i_inner_2-i_inner_1, j, k] = (
doppler_factor_jk * w[i1:i2].evaluate(Rjk).interpolate(uprime_inner * doppler_factor_jk)
)
Hprime[i_inner_1:i_inner_2] = spinsfast.map2salm(Hprime_grid[:i_inner_2-i_inner_1], w.spin_weight, ell_max)
# Move to the next segment
if i_inner_2 == uprime.size:
break
i_inner_1 = i_inner_2
i_outer_1 = max(0, i_inner_1 - i_padding)
i_inner_2 = min(i_inner_2 + i_step_size, uprime.size)
i_outer_2 = min(i_inner_2 + i_padding, uprime.size)
return type(w)(
Hprime, time=uprime, time_axis=time_axis, modes_axis=modes_axis,
ell_min=0, ell_max=ell_max, data_type=w.data_type, spin_weight=w.spin_weight
)
| [
"numpy.arctanh",
"spinsfast.map2salm",
"numpy.tensordot",
"numpy.cross",
"numpy.zeros",
"quaternionic.array.from_vector_part",
"numpy.diff",
"numpy.linalg.norm",
"numpy.array",
"numpy.exp",
"spherical.Ysize",
"numpy.tan",
"quaternionic.array.from_spherical_coordinates",
"spherical.theta_ph... | [((2733, 2749), 'numpy.array', 'np.array', (['uprime'], {}), '(uprime)\n', (2741, 2749), True, 'import numpy as np\n'), ((3682, 3700), 'numpy.linalg.norm', 'np.linalg.norm', (['v⃗'], {}), '(v⃗)\n', (3696, 3700), True, 'import numpy as np\n'), ((3771, 3784), 'numpy.arctanh', 'np.arctanh', (['β'], {}), '(β)\n', (3781, 3784), True, 'import numpy as np\n'), ((3911, 3932), 'numpy.cross', 'np.cross', (['n̂prime', 'v̂'], {}), '(n̂prime, v̂)\n', (3919, 3932), True, 'import numpy as np\n'), ((3946, 3973), 'numpy.linalg.norm', 'np.linalg.norm', (['nv'], {'axis': '(-1)'}), '(nv, axis=-1)\n', (3960, 3973), True, 'import numpy as np\n'), ((6283, 6301), 'numpy.linalg.norm', 'np.linalg.norm', (['v⃗'], {}), '(v⃗)\n', (6297, 6301), True, 'import numpy as np\n'), ((6380, 6393), 'numpy.arctanh', 'np.arctanh', (['β'], {}), '(β)\n', (6390, 6393), True, 'import numpy as np\n'), ((6471, 6508), 'spherical.theta_phi', 'spherical.theta_phi', (['nθprime', 'nφprime'], {}), '(nθprime, nφprime)\n', (6490, 6508), False, 'import spherical\n'), ((6520, 6579), 'quaternionic.array.from_spherical_coordinates', 'quaternionic.array.from_spherical_coordinates', (['θprimeφprime'], {}), '(θprimeφprime)\n', (6565, 6579), False, 'import quaternionic\n'), ((7336, 7392), 'numpy.zeros', 'np.zeros', (['(i_step_size, nθprime, nφprime)'], {'dtype': 'complex'}), '((i_step_size, nθprime, nφprime), dtype=complex)\n', (7344, 7392), True, 'import numpy as np\n'), ((1832, 1851), 'numpy.sqrt', 'np.sqrt', (['(1 - β ** 2)'], {}), '(1 - β ** 2)\n', (1839, 1851), True, 'import numpy as np\n'), ((2357, 2383), 'numpy.diff', 'np.diff', (['(u / (γ * (1 + β)))'], {}), '(u / (γ * (1 + β)))\n', (2364, 2383), True, 'import numpy as np\n'), ((2442, 2468), 'numpy.diff', 'np.diff', (['(u / (γ * (1 - β)))'], {}), '(u / (γ * (1 - β)))\n', (2449, 2468), True, 'import numpy as np\n'), ((3808, 3848), 'numpy.tensordot', 'np.tensordot', (['v̂', 'n̂prime'], {'axes': '[-1, -1]'}), '(v̂, n̂prime, axes=[-1, -1])\n', (3820, 3848), True, 'import numpy as np\n'), ((6354, 6373), 'numpy.sqrt', 'np.sqrt', (['(1 - β ** 2)'], {}), '(1 - β ** 2)\n', (6361, 6373), True, 'import numpy as np\n'), ((8314, 8393), 'spinsfast.map2salm', 'spinsfast.map2salm', (['Hprime_grid[:i_inner_2 - i_inner_1]', 'w.spin_weight', 'ell_max'], {}), '(Hprime_grid[:i_inner_2 - i_inner_1], w.spin_weight, ell_max)\n', (8332, 8393), False, 'import spinsfast\n'), ((4049, 4088), 'quaternionic.array.from_vector_part', 'quaternionic.array.from_vector_part', (['nv'], {}), '(nv)\n', (4084, 4088), False, 'import quaternionic\n'), ((6762, 6797), 'numpy.tensordot', 'np.tensordot', (['v⃗', 'n̂'], {'axes': '[-1, -1]'}), '(v⃗, n̂, axes=[-1, -1])\n', (6774, 6797), True, 'import numpy as np\n'), ((6906, 6933), 'spherical.Ysize', 'spherical.Ysize', (['(0)', 'ell_max'], {}), '(0, ell_max)\n', (6921, 6933), False, 'import spherical\n'), ((3872, 3882), 'numpy.exp', 'np.exp', (['(-φ)'], {}), '(-φ)\n', (3878, 3882), True, 'import numpy as np\n'), ((3886, 3904), 'numpy.tan', 'np.tan', (['(Θprime / 2)'], {}), '(Θprime / 2)\n', (3892, 3904), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from nltk.tokenize.treebank import TreebankWordDetokenizer
latex_special_token = ["!@#$%^&*()"]
def generate(text_list, attention_list, latex_file, color='red', rescale_value=False):
assert(len(text_list) == len(attention_list))
if rescale_value:
attention_list = rescale(attention_list)
word_num = len(text_list)
text_list = clean_word(text_list)
with open(latex_file, 'w') as f:
f.write(r'''\documentclass[varwidth]{standalone}
\special{papersize=210mm,297mm}
\usepackage{color}
\usepackage{tcolorbox}
\usepackage{CJK}
\usepackage{adjustbox}
\tcbset{width=0.9\textwidth,boxrule=0pt,colback=red,arc=0pt,auto outer arc,left=0pt,right=0pt,boxsep=5pt}
\begin{document}
\begin{CJK*}{UTF8}{gbsn}'''+'\n')
string = r'''{\setlength{\fboxsep}{0pt}\colorbox{white!0}{\parbox{0.9\textwidth}{'''+"\n"
for idx in range(word_num):
string += "\\colorbox{%s!%s}{" % (
color, attention_list[idx])+"\\strut " + text_list[idx]+"} "
string += "\n}}}"
f.write(string+'\n')
f.write(r'''\end{CJK*}
\end{document}''')
def rescale(input_list):
the_array = np.asarray(input_list)
the_max = np.max(the_array)
the_min = np.min(the_array)
rescale = (the_array - the_min)/(the_max-the_min)*100
return rescale.tolist()
def clean_word(word_list):
new_word_list = []
for word in word_list:
for latex_sensitive in ["\\", "%", "&", "^", "#", "_", "{", "}"]:
if latex_sensitive in word:
word = word.replace(latex_sensitive, '\\'+latex_sensitive)
new_word_list.append(word)
return new_word_list
if __name__ == '__main__':
# This is a demo:
df = pd.read_json('train.json', lines=True)
sent = TreebankWordDetokenizer().detokenize(df['text'][0])
words = sent.split()
word_num=len(words)
#attention = [(x+1.)/word_num*100 for x in range(word_num)]
attention=np.zeros(word_num)
import random
random.seed(42)
random.shuffle(attention)
color = 'red'
generate(words, attention, "sample.tex", color)
| [
"random.shuffle",
"numpy.asarray",
"numpy.zeros",
"pandas.read_json",
"numpy.max",
"numpy.min",
"random.seed",
"nltk.tokenize.treebank.TreebankWordDetokenizer"
] | [((1185, 1207), 'numpy.asarray', 'np.asarray', (['input_list'], {}), '(input_list)\n', (1195, 1207), True, 'import numpy as np\n'), ((1222, 1239), 'numpy.max', 'np.max', (['the_array'], {}), '(the_array)\n', (1228, 1239), True, 'import numpy as np\n'), ((1254, 1271), 'numpy.min', 'np.min', (['the_array'], {}), '(the_array)\n', (1260, 1271), True, 'import numpy as np\n'), ((1748, 1786), 'pandas.read_json', 'pd.read_json', (['"""train.json"""'], {'lines': '(True)'}), "('train.json', lines=True)\n", (1760, 1786), True, 'import pandas as pd\n'), ((1977, 1995), 'numpy.zeros', 'np.zeros', (['word_num'], {}), '(word_num)\n', (1985, 1995), True, 'import numpy as np\n'), ((2018, 2033), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (2029, 2033), False, 'import random\n'), ((2038, 2063), 'random.shuffle', 'random.shuffle', (['attention'], {}), '(attention)\n', (2052, 2063), False, 'import random\n'), ((1798, 1823), 'nltk.tokenize.treebank.TreebankWordDetokenizer', 'TreebankWordDetokenizer', ([], {}), '()\n', (1821, 1823), False, 'from nltk.tokenize.treebank import TreebankWordDetokenizer\n')] |
import random
import unittest
import numpy as np
from scipy.sparse import coo_matrix
from rlscore.learner import CGRankRLS
from rlscore.learner.cg_rankrls import PCGRankRLS
from rlscore.learner import QueryRankRLS
class Test(unittest.TestCase):
def setUp(self):
np.random.seed(100)
random.seed(100)
def testOrdinalRegression(self):
m, n = 100, 300
for regparam in [0.00000001, 1, 100000000]:
#for regparam in [1000]:
Xtrain = np.mat(np.random.rand(n, m))
Y = np.mat(np.random.rand(m, 1))
rpool = {}
rpool['X'] = Xtrain.T
rpool['Y'] = Y
rpool['regparam'] = regparam
rpool["bias"] = 1.0
rls = CGRankRLS(**rpool)
model = rls.predictor
W = model.W
In = np.mat(np.identity(n))
Im = np.mat(np.identity(m))
L = np.mat(Im-(1./m)*np.ones((m,m), dtype=np.float64))
G = Xtrain*L*Xtrain.T+regparam*In
W2 = np.squeeze(np.array(G.I*Xtrain*L*Y))
for i in range(W.shape[0]):
#for j in range(W.shape[1]):
# self.assertAlmostEqual(W[i,j],W2[i,j], places=5)
self.assertAlmostEqual(W[i], W2[i], places = 5)
def testPairwisePreferences(self):
m, n = 100, 300
for regparam in [0.00000001, 1, 100000000]:
Xtrain = np.mat(np.random.rand(n, m))
Y = np.mat(np.random.rand(m, 1))
pairs = []
for i in range(1000):
a = random.randint(0, m - 1)
b = random.randint(0, m - 1)
if Y[a] > Y[b]:
pairs.append((a, b))
else:
pairs.append((b, a))
pairs = np.array(pairs)
rpool = {}
rpool['X'] = Xtrain.T
#rpool['Y'] = Y
rpool['train_preferences'] = pairs
rpool['regparam'] = regparam
rpool["bias"] = 1.0
rls = PCGRankRLS(**rpool)
model = rls.predictor
W = model.W
In = np.mat(np.identity(n))
Im = np.mat(np.identity(m))
vals = np.concatenate([np.ones((pairs.shape[0]), dtype=np.float64), -np.ones((pairs.shape[0]), dtype=np.float64)])
row = np.concatenate([np.arange(pairs.shape[0]),np.arange(pairs.shape[0])])
col = np.concatenate([pairs[:,0], pairs[:,1]])
coo = coo_matrix((vals, (row, col)), shape=(pairs.shape[0], Xtrain.T.shape[0]))
L = (coo.T*coo).todense()
G = Xtrain*L*Xtrain.T+regparam*In
W2 = np.squeeze(np.array(G.I*Xtrain*coo.T*np.mat(np.ones((pairs.shape[0],1)))))
for i in range(W.shape[0]):
#for j in range(W.shape[1]):
# self.assertAlmostEqual(W[i,j],W2[i,j], places=4)
self.assertAlmostEqual(W[i], W2[i], places=4)
def testQueryData(self):
np.random.seed(100)
floattype = np.float64
m, n = 100, 400 #data, features
Xtrain = np.mat(np.random.rand(m, n))
Y = np.mat(np.zeros((m, 1), dtype=floattype))
Y[:, 0] = np.sum(Xtrain, 1)
qidlist = [0 for i in range(100)]
for h in range(5, 12):
qidlist[h] = 1
for h in range(12, 32):
qidlist[h] = 2
for h in range(32, 34):
qidlist[h] = 3
for h in range(34, 85):
qidlist[h] = 4
for h in range(85, 100):
qidlist[h] = 5
kwargs = {}
kwargs['X'] = Xtrain
kwargs['Y'] = Y
kwargs['qids'] = qidlist
kwargs['regparam'] = 1.0
learner1 = QueryRankRLS(**kwargs)
learner2 = CGRankRLS(**kwargs)
mdiff = np.max(1. - learner1.predictor.W / learner2.predictor.W)
if mdiff > 0.01:
assert False | [
"numpy.random.seed",
"numpy.sum",
"random.randint",
"rlscore.learner.CGRankRLS",
"rlscore.learner.cg_rankrls.PCGRankRLS",
"numpy.zeros",
"numpy.identity",
"numpy.ones",
"rlscore.learner.QueryRankRLS",
"numpy.max",
"scipy.sparse.coo_matrix",
"random.seed",
"numpy.array",
"numpy.arange",
"... | [((283, 302), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (297, 302), True, 'import numpy as np\n'), ((311, 327), 'random.seed', 'random.seed', (['(100)'], {}), '(100)\n', (322, 327), False, 'import random\n'), ((3061, 3080), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (3075, 3080), True, 'import numpy as np\n'), ((3270, 3287), 'numpy.sum', 'np.sum', (['Xtrain', '(1)'], {}), '(Xtrain, 1)\n', (3276, 3287), True, 'import numpy as np\n'), ((3783, 3805), 'rlscore.learner.QueryRankRLS', 'QueryRankRLS', ([], {}), '(**kwargs)\n', (3795, 3805), False, 'from rlscore.learner import QueryRankRLS\n'), ((3825, 3844), 'rlscore.learner.CGRankRLS', 'CGRankRLS', ([], {}), '(**kwargs)\n', (3834, 3844), False, 'from rlscore.learner import CGRankRLS\n'), ((3861, 3918), 'numpy.max', 'np.max', (['(1.0 - learner1.predictor.W / learner2.predictor.W)'], {}), '(1.0 - learner1.predictor.W / learner2.predictor.W)\n', (3867, 3918), True, 'import numpy as np\n'), ((754, 772), 'rlscore.learner.CGRankRLS', 'CGRankRLS', ([], {}), '(**rpool)\n', (763, 772), False, 'from rlscore.learner import CGRankRLS\n'), ((1840, 1855), 'numpy.array', 'np.array', (['pairs'], {}), '(pairs)\n', (1848, 1855), True, 'import numpy as np\n'), ((2079, 2098), 'rlscore.learner.cg_rankrls.PCGRankRLS', 'PCGRankRLS', ([], {}), '(**rpool)\n', (2089, 2098), False, 'from rlscore.learner.cg_rankrls import PCGRankRLS\n'), ((2473, 2515), 'numpy.concatenate', 'np.concatenate', (['[pairs[:, 0], pairs[:, 1]]'], {}), '([pairs[:, 0], pairs[:, 1]])\n', (2487, 2515), True, 'import numpy as np\n'), ((2532, 2605), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(vals, (row, col))'], {'shape': '(pairs.shape[0], Xtrain.T.shape[0])'}), '((vals, (row, col)), shape=(pairs.shape[0], Xtrain.T.shape[0]))\n', (2542, 2605), False, 'from scipy.sparse import coo_matrix\n'), ((3176, 3196), 'numpy.random.rand', 'np.random.rand', (['m', 'n'], {}), '(m, n)\n', (3190, 3196), True, 'import numpy as np\n'), ((3217, 3250), 'numpy.zeros', 'np.zeros', (['(m, 1)'], {'dtype': 'floattype'}), '((m, 1), dtype=floattype)\n', (3225, 3250), True, 'import numpy as np\n'), ((512, 532), 'numpy.random.rand', 'np.random.rand', (['n', 'm'], {}), '(n, m)\n', (526, 532), True, 'import numpy as np\n'), ((557, 577), 'numpy.random.rand', 'np.random.rand', (['m', '(1)'], {}), '(m, 1)\n', (571, 577), True, 'import numpy as np\n'), ((858, 872), 'numpy.identity', 'np.identity', (['n'], {}), '(n)\n', (869, 872), True, 'import numpy as np\n'), ((898, 912), 'numpy.identity', 'np.identity', (['m'], {}), '(m)\n', (909, 912), True, 'import numpy as np\n'), ((1055, 1085), 'numpy.array', 'np.array', (['(G.I * Xtrain * L * Y)'], {}), '(G.I * Xtrain * L * Y)\n', (1063, 1085), True, 'import numpy as np\n'), ((1457, 1477), 'numpy.random.rand', 'np.random.rand', (['n', 'm'], {}), '(n, m)\n', (1471, 1477), True, 'import numpy as np\n'), ((1502, 1522), 'numpy.random.rand', 'np.random.rand', (['m', '(1)'], {}), '(m, 1)\n', (1516, 1522), True, 'import numpy as np\n'), ((1614, 1638), 'random.randint', 'random.randint', (['(0)', '(m - 1)'], {}), '(0, m - 1)\n', (1628, 1638), False, 'import random\n'), ((1659, 1683), 'random.randint', 'random.randint', (['(0)', '(m - 1)'], {}), '(0, m - 1)\n', (1673, 1683), False, 'import random\n'), ((2184, 2198), 'numpy.identity', 'np.identity', (['n'], {}), '(n)\n', (2195, 2198), True, 'import numpy as np\n'), ((2224, 2238), 'numpy.identity', 'np.identity', (['m'], {}), '(m)\n', (2235, 2238), True, 'import numpy as np\n'), ((2275, 2316), 'numpy.ones', 'np.ones', (['pairs.shape[0]'], {'dtype': 'np.float64'}), '(pairs.shape[0], dtype=np.float64)\n', (2282, 2316), True, 'import numpy as np\n'), ((2401, 2426), 'numpy.arange', 'np.arange', (['pairs.shape[0]'], {}), '(pairs.shape[0])\n', (2410, 2426), True, 'import numpy as np\n'), ((2427, 2452), 'numpy.arange', 'np.arange', (['pairs.shape[0]'], {}), '(pairs.shape[0])\n', (2436, 2452), True, 'import numpy as np\n'), ((947, 980), 'numpy.ones', 'np.ones', (['(m, m)'], {'dtype': 'np.float64'}), '((m, m), dtype=np.float64)\n', (954, 980), True, 'import numpy as np\n'), ((2321, 2362), 'numpy.ones', 'np.ones', (['pairs.shape[0]'], {'dtype': 'np.float64'}), '(pairs.shape[0], dtype=np.float64)\n', (2328, 2362), True, 'import numpy as np\n'), ((2751, 2779), 'numpy.ones', 'np.ones', (['(pairs.shape[0], 1)'], {}), '((pairs.shape[0], 1))\n', (2758, 2779), True, 'import numpy as np\n')] |
import argparse
import csv
import glob
import json
import time
from pathlib import Path
import cv2
import numpy as np
import pandas as pd
Land_cover_Classes = {
'Mixed forest': 0,
'Coniferous forest': 1,
'Non-irrigated arable land': 2,
'Transitional woodland/shrub': 3,
'Broad-leaved forest': 4,
'Land principally occupied by agriculture, with significant areas of natural vegetation': 5,
'Complex cultivation patterns': 6,
'Pastures': 7,
'Water bodies': 8,
'Sea and ocean': 9,
'Discontinuous urban fabric':10,
'Agro-forestry areas': 11,
'Peatbogs': 12,
'Permanently irrigated land': 13,
'Industrial or commercial units': 14,
'Natural grassland': 15,
'Olive groves': 16,
'Sclerophyllous vegetation': 17,
'Continuous urban fabric': 18,
'Water courses': 19,
'Vineyards': 20,
'Annual crops associated with permanent crops': 21,
'Inland marshes': 22,
'Moors and heathland': 23,
'Sport and leisure facilities': 24,
'Fruit trees and berry plantations': 25,
'Mineral extraction sites': 26,
'Rice fields': 27,
'Road and rail networks and associated land': 28,
'Bare rock': 29,
'Green urban areas': 30,
'Beaches, dunes, sands': 31,
'Sparsely vegetated areas': 32,
'Salt marshes': 33,
'Coastal lagoons': 34,
'Construction sites': 35,
'Estuaries': 36,
'Intertidal flats': 37,
'Airports': 38,
'Dump sites': 39,
'Port areas': 40,
'Salines': 41,
'Burnt areas': 42
}
class BigEarthUtils:
def __init__(self):
pass
@staticmethod
def big_earth_to_csv(big_e_path: str, num_samples: int, csv_filename: str) -> True:
"""
Function which generate the csv file of all or a portion of the BigEarth dataset
:param big_e_path: path to BigEarth dataset
:param num_samples: number of samples to consider in the creation of the csv file (-1 to select all dataset)
:param csv_filename: name of the created file
:return: True
"""
path = Path(big_e_path)
print("collecting dirs...")
start_time = time.time()
labels_names = []
labels_values = []
if num_samples == -1:
dirs = [str(e) for e in path.iterdir() if e.is_dir()]
else:
# zip and range() to choose only a specific number of example
dirs = [str(e) for _, e in zip(range(num_samples), path.iterdir()) if e.is_dir()]
for idx, d in enumerate(dirs):
for e in glob.glob(d + "/*.json"):
with open(e) as f:
j_file = json.load(f)
labels_names.append(j_file['labels'])
labels_values.append([Land_cover_Classes[label] for label in j_file['labels']])
# write the dirs on a csv file
print("writing on csv...")
things_to_write = zip(dirs, labels_names, labels_values)
with open(csv_filename, "w") as f:
writer = csv.writer(f)
writer.writerows(things_to_write)
print(f"finishing in : {time.time() - start_time}")
return True
@staticmethod
def min_max_quantile(csv_filename: str, n_samples: int) -> dict:
"""
Function that compute the
:param csv_filename: path of the csv_filename of the BigEarth dataset
:param n_samples: number of samples to use for calculate the min and max quantile
:return: a dict containing min and max quantiles for every sentinel-2 bands
"""
bands = ["B01", "B02", "B03", "B04", "B05", "B06", "B07", "B08", "B8A", "B09", "B11", "B12"]
data = pd.read_csv(csv_filename, header=None)
paths = data.iloc[:, 0].tolist()
quantiles = {}
for b in bands:
imgs = []
for i in range(n_samples):
path = paths[i] # i choose the i-th path of the list
for filename in glob.iglob(path + "/*" + b + ".tif"):
img = cv2.imread(filename, cv2.IMREAD_UNCHANGED)
imgs.append(img)
imgs = np.stack(imgs, axis=0).reshape(-1)
quantiles[b] = {
'min_q': np.quantile(imgs, 0.02),
'max_q': np.quantile(imgs, 0.98)
}
print(b, quantiles[b])
return quantiles
@staticmethod
def save_dict_to_json(d: dict, json_path: str) -> None:
with open(json_path, 'w') as f:
json.dump(d, f, indent=4)
@staticmethod
def delete_patches(csv_filename: str) -> True:
"""
Function which delete images covered by cloud or snow
:param csv_filename: Dataset file created above
:return: True
"""
csv_snow_patches = 'patches_with_seasonal_snow.csv'
csv_clouds_patches = 'patches_with_cloud_and_shadow.csv'
data = pd.read_csv(csv_filename, header=None)
snow_patches = pd.read_csv(Path.cwd() / csv_snow_patches, header=None)
clouds_patches = pd.read_csv(Path.cwd() / csv_clouds_patches, header=None)
patches = snow_patches.iloc[:, 0].tolist() + clouds_patches.iloc[:, 0].tolist()
df = data[~data.iloc[:, 0].str.contains('|'.join(patches))]
df.to_csv(csv_filename[:-4] + '_no_clouds_and_snow_server' + csv_filename[-4:], header=None, index=False)
return True
@staticmethod
def delete_patches_v2(csv_filename: str) -> True:
"""
Function which delete images covered by cloud or snow
:param csv_filename: Dataset file created above
:return: True
"""
data = pd.read_csv(csv_filename, header=None)
data_copy = data.copy()
data_copy = data_copy.replace({"/nas/softechict-nas-2/svincenzi/BigEarthNet-v1.0/": ""}, regex=True)
csv_snow_patches = 'patches_with_seasonal_snow.csv'
csv_clouds_patches = 'patches_with_cloud_and_shadow.csv'
snow_patches = pd.read_csv(Path.cwd() / csv_snow_patches, header=None)
clouds_patches = pd.read_csv(Path.cwd() / csv_clouds_patches, header=None)
patches = snow_patches.iloc[:, 0].tolist() + clouds_patches.iloc[:, 0].tolist()
data = data[~data_copy.iloc[:, 0].isin(patches)]
data.to_csv(csv_filename[:-4] + '_no_clouds_and_snow_v2' + csv_filename[-4:], header=None, index=False)
return True
@staticmethod
def replace_path_csv(csv_filename: str, new_path: str) -> True:
"""
function that change the path in the csv file
:param csv_filename: Dataset file
:param new_path: new path to set in the csv file
:return: True
"""
data = pd.read_csv(csv_filename, header=None)
data = data.replace({"/nas/softechict-nas-2/svincenzi/BigEarthNet-v1.0/": new_path}, regex=True)
data.to_csv(csv_filename[:-4] + '_new_path' + csv_filename[-4:], header=None, index=False)
return True
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description='BigEarthNet utils')
argparser.add_argument('--big_e_path', type=str, default=None, required=True, help='path to the BigEarth dataset')
argparser.add_argument('--num_samples', type=int, default=-1, help='Number of samples to create the csv file')
argparser.add_argument('--csv_filename', type=str, default='BigEarth.csv', help='Name of the csv dataset file')
argparser.add_argument('--n_samples', type=int, default=3000, help='Number of samples to calculate the min-max quantile')
argparser.add_argument('--mode', default='csv_creation', choices=['csv_creation', 'delete_patches', 'delete_patches_v2',
'quantiles', 'replace_path_csv'],
type=str, help='select the action to perform: csv_creation, delete_patches, '
'delete_patches_v2, quantiles or replace_path_csv')
argparser.add_argument('--new_path_csv', type=str, default=None, help='indicate the new path to change the csv')
args = argparser.parse_args()
# csv creation
if args.mode == 'csv_creation':
BigEarthUtils.big_earth_to_csv(args.big_e_path, args.num_samples, args.csv_filename)
# delete patches
elif args.mode == 'delete_patches':
BigEarthUtils.delete_patches(args.csv_filename)
# delete patches_v2
elif args.mode == 'delete_patches_v2':
BigEarthUtils.delete_patches_v2(args.csv_filename)
# min-max quantiles
elif args.mode == 'quantiles':
quantiles = BigEarthUtils.min_max_quantile(args.csv_filename, args.n_samples)
# save the quantiles on a json file
BigEarthUtils.save_dict_to_json(quantiles, f"quantiles_{args.n_samples}.json")
# replace csv path
elif args.mode == 'replace_path_csv':
BigEarthUtils.replace_path_csv(args.csv_filename, args.new_path_csv)
| [
"numpy.stack",
"json.dump",
"numpy.quantile",
"json.load",
"argparse.ArgumentParser",
"csv.writer",
"pandas.read_csv",
"time.time",
"cv2.imread",
"pathlib.Path",
"glob.glob",
"glob.iglob",
"pathlib.Path.cwd"
] | [((6974, 7030), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""BigEarthNet utils"""'}), "(description='BigEarthNet utils')\n", (6997, 7030), False, 'import argparse\n'), ((2066, 2082), 'pathlib.Path', 'Path', (['big_e_path'], {}), '(big_e_path)\n', (2070, 2082), False, 'from pathlib import Path\n'), ((2140, 2151), 'time.time', 'time.time', ([], {}), '()\n', (2149, 2151), False, 'import time\n'), ((3661, 3699), 'pandas.read_csv', 'pd.read_csv', (['csv_filename'], {'header': 'None'}), '(csv_filename, header=None)\n', (3672, 3699), True, 'import pandas as pd\n'), ((4882, 4920), 'pandas.read_csv', 'pd.read_csv', (['csv_filename'], {'header': 'None'}), '(csv_filename, header=None)\n', (4893, 4920), True, 'import pandas as pd\n'), ((5621, 5659), 'pandas.read_csv', 'pd.read_csv', (['csv_filename'], {'header': 'None'}), '(csv_filename, header=None)\n', (5632, 5659), True, 'import pandas as pd\n'), ((6666, 6704), 'pandas.read_csv', 'pd.read_csv', (['csv_filename'], {'header': 'None'}), '(csv_filename, header=None)\n', (6677, 6704), True, 'import pandas as pd\n'), ((2543, 2567), 'glob.glob', 'glob.glob', (["(d + '/*.json')"], {}), "(d + '/*.json')\n", (2552, 2567), False, 'import glob\n'), ((3007, 3020), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (3017, 3020), False, 'import csv\n'), ((4482, 4507), 'json.dump', 'json.dump', (['d', 'f'], {'indent': '(4)'}), '(d, f, indent=4)\n', (4491, 4507), False, 'import json\n'), ((3951, 3987), 'glob.iglob', 'glob.iglob', (["(path + '/*' + b + '.tif')"], {}), "(path + '/*' + b + '.tif')\n", (3961, 3987), False, 'import glob\n'), ((4203, 4226), 'numpy.quantile', 'np.quantile', (['imgs', '(0.02)'], {}), '(imgs, 0.02)\n', (4214, 4226), True, 'import numpy as np\n'), ((4253, 4276), 'numpy.quantile', 'np.quantile', (['imgs', '(0.98)'], {}), '(imgs, 0.98)\n', (4264, 4276), True, 'import numpy as np\n'), ((4956, 4966), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (4964, 4966), False, 'from pathlib import Path\n'), ((5037, 5047), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (5045, 5047), False, 'from pathlib import Path\n'), ((5961, 5971), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (5969, 5971), False, 'from pathlib import Path\n'), ((6042, 6052), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (6050, 6052), False, 'from pathlib import Path\n'), ((2633, 2645), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2642, 2645), False, 'import json\n'), ((4015, 4057), 'cv2.imread', 'cv2.imread', (['filename', 'cv2.IMREAD_UNCHANGED'], {}), '(filename, cv2.IMREAD_UNCHANGED)\n', (4025, 4057), False, 'import cv2\n'), ((4114, 4136), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (4122, 4136), True, 'import numpy as np\n'), ((3099, 3110), 'time.time', 'time.time', ([], {}), '()\n', (3108, 3110), False, 'import time\n')] |
"""
test features_retriever
-----------------------
Testing the feature retriever.
"""
import numpy as np
from itertools import product
from pySpatialTools.FeatureManagement.features_retriever import\
FeaturesManager, _features_parsing_creation,\
_featuresmanager_parsing_creation
from pySpatialTools.FeatureManagement.features_objects import\
ImplicitFeatures, ExplicitFeatures, BaseFeatures,\
_featuresobject_parsing_creation
from pySpatialTools.FeatureManagement.Descriptors import DummyDescriptor
from pySpatialTools.FeatureManagement.Descriptors import AvgDescriptor
from pySpatialTools.utils.perturbations import PermutationPerturbation
from pySpatialTools.utils.artificial_data import\
categorical_agg_dict_features
from pySpatialTools.utils.neighs_info import Neighs_Info
from pySpatialTools.FeatureManagement.aux_resulter_building import\
DefaultResulter, GeneralResulter, default_creation_initializations,\
creation_concatenator_joiner, creation_null_joiner,\
creation_initialization_output_closearray,\
creation_initialization_output_list, creation_initialization_output_lists,\
creation_initialization_output_list_selfdriven,\
creation_initialization_desc_dict, creation_initialization_desc_array
def test():
###########################################################################
#### Testing auxiliar resulters
## Functions which helps to manage descriptors and build result measure
##
def test_resulter_functions(fm):
## Application of joiner functions
creation_concatenator_joiner()
creation_null_joiner()
## Creation function tests
default_creation_initializations(fm)
if all([e is not None for e in fm.shape_measure]):
creation_initialization_output_closearray(fm)
creation_initialization_output_list(fm)
creation_initialization_output_lists(fm)
creation_initialization_output_list_selfdriven(fm)
creation_initialization_desc_dict(fm)
creation_initialization_desc_array(fm)
## Creation of resulters tests
resulter = DefaultResulter(fm)
GeneralResulter(*resulter.get_functions())
## Definition parameters
n = 1000
rei = 10
n, n_feats = np.random.randint(10, 1000), np.random.randint(1, 20)
ks = np.random.randint(1, 20)
###########################################################################
##########################
#### FeatureRetriever testing
reindices0 = np.arange(n)
reindices = np.vstack([reindices0]+[np.random.permutation(n)
for i in range(rei-1)]).T
perturbation = PermutationPerturbation(reindices)
aggcatfeats_dict = categorical_agg_dict_features(n, n_feats, ks)
## Impossible instantiation cases
try:
# Not valid oject as a feature
boolean = False
fm = FeaturesManager(None, None)
boolean = True
raise Exception("It has to halt here.")
except:
if boolean:
raise Exception("It has to halt here.")
try:
# Object not valid as a feature
boolean = False
avgdesc = AvgDescriptor()
fm = FeaturesManager([], avgdesc)
boolean = True
raise Exception("It has to halt here.")
except:
if boolean:
raise Exception("It has to halt here.")
# try:
# boolean = False
# Feat_imp = ImplicitFeatures(contfeats_ar0, perturbation)
# fm = FeaturesManager(Feat_imp, None)
# boolean = True
# raise Exception("It has to halt here.")
# except:
# if boolean:
# raise Exception("It has to halt here.")
try:
# Different k_perturb
boolean = False
feats0 = ExplicitFeatures(np.random.random((100, 2, 4)))
feats1 = ExplicitFeatures(np.random.random((100, 3, 3)))
fm = FeaturesManager([feats0, feats1])
boolean = True
raise Exception("It has to halt here.")
except:
if boolean:
raise Exception("It has to halt here.")
try:
# Object not valid as a features
boolean = False
fm = FeaturesManager([5])
boolean = True
raise Exception("It has to halt here.")
except:
if boolean:
raise Exception("It has to halt here.")
try:
# Object not valid as a features
boolean = False
fm = FeaturesManager(lambda x: x)
boolean = True
raise Exception("It has to halt here.")
except:
if boolean:
raise Exception("It has to halt here.")
feats0 = np.random.random(100)
feats1 = np.random.random((100, 1))
feats2 = np.random.random((100, 1, 1))
feats3 = np.random.random((100, 2))
Feat_imp = ImplicitFeatures(feats1)
Feat_imp2 = ImplicitFeatures(feats3, names=[3, 4])
Feat_exp = ExplicitFeatures(aggcatfeats_dict)
avgdesc = AvgDescriptor()
pos_feats = [feats0, feats1, feats2, Feat_imp, Feat_exp,
[feats2, Feat_imp]]
pos_mapvals_i = [None, ('matrix', 100, 20)]#, lambda x: x, 'matrix']
pos_map_in = [None, lambda i_info, k: i_info]
pos_map_out = [None, lambda self, feats: feats]
pos_mode = [None, 'parallel', 'sequential']
pos_desc = [None, avgdesc]
possibilities = [pos_feats, pos_map_in, pos_map_out, pos_mapvals_i,
pos_mode, pos_desc]
## Random parameter space exploration
mapper0 = [None]*3
mapper1 = [(0, 0)]*3
mapper2 = [np.array([np.zeros(100), np.zeros(100)]).T]*3
mapper3 = [lambda idx: (0, 0)]*3
mapper4 = [(0, 0), (0, 0), (1, 0)]
pos_mappers = [mapper0, mapper1, mapper2, mapper3, mapper4]
## Information of indices
nei_i = Neighs_Info()
# nei_i.set(np.random.randint(0, 100, 5).reshape((5, 1, 1)))
nei_i.set(np.random.randint(0, 100))
nei_info = Neighs_Info()
nei_info.set(np.random.randint(0, 100, 20).reshape((5, 2, 2)))
## Combinations
for p in product(*possibilities):
# ## Random exploration of parameters
# feats = pos_feats[np.random.randint(0, len(pos_feats))]
# m_input = pos_map_in[np.random.randint(0, len(pos_map_in))]
# m_out = pos_map_out[np.random.randint(0, len(pos_map_out))]
# m_vals_i = pos_mapvals_i[np.random.randint(0, len(pos_mapvals_i))]
# mode = pos_mode[np.random.randint(0, len(pos_mode))]
# desc = pos_desc[np.random.randint(0, len(pos_desc))]
## Exhaustive exploration of parameters
i_selector = np.random.randint(0, len(pos_mappers))
selectors = pos_mappers[i_selector]
if i_selector == 4:
continue
#print i_selector
feats, m_input, m_out, m_vals_i, mode, desc = p
## Instantiation
fm = FeaturesManager(feats, maps_input=m_input, maps_output=m_out,
maps_vals_i=m_vals_i, mode=mode,
descriptormodels=desc, selectors=selectors)
## Basic parameters
i0, i1 = 0, range(4)
k_p = fm.k_perturb+1
nei_info0 = Neighs_Info()
nei_info1 = Neighs_Info()
neis = np.random.randint(0, 100, 8*k_p).reshape((k_p, 4, 2))
neis0 = np.random.randint(0, 100, 2*k_p).reshape((k_p, 1, 2))
nei_info0.set(neis0)
nei_info1.set(neis)
## Check basic functions
fm[0]
fm.shape
len(fm)
fm.set_map_vals_i(m_vals_i)
fm.initialization_desc()
fm.initialization_output()
fm.set_map_vals_i(100)
fm.set_map_vals_i(m_vals_i)
fm.set_descriptormodels(desc)
## Check basic functions
fm.get_type_feats(0)
fm.get_type_feats(50)
fm.get_type_feats(0, tuple([(0, 0)]*3))
fm.get_type_feats(50, tuple([(0, 0)]*3))
# fm.get_type_feats(i0)
# fm.get_type_feats(i1)
t_feat_in, t_feat_out, t_feat_des = fm.get_type_feats(50)
tf_in0, tf_out0, tf_desc0 = fm.get_type_feats(i0)
tf_in1, tf_out1, tf_desc1 = fm.get_type_feats(i1)
## Interaction with featuresObjects
# Input
fm._get_input_features(50, k=range(k_p), typefeats=t_feat_in)
if i_selector == 0:
fm._get_input_features([50], k=range(k_p), typefeats=[(0, 0)])
desc_i0 = fm._get_input_features(i0, k=range(k_p), typefeats=tf_in0)
desc_i1 = fm._get_input_features(i1, k=range(k_p), typefeats=tf_in1)
# print feats, len(desc_i0), k_p
# print fm._get_input_features, desc_i0, desc_i1
assert(len(desc_i0) == k_p)
assert(len(desc_i1) == k_p)
assert(len(desc_i0[0]) == 1)
assert(len(desc_i1[0]) == 4)
# Output
fm._get_output_features(range(10), k=range(k_p), typefeats=t_feat_out)
fm._get_output_features(neis[0], k=range(k_p), typefeats=t_feat_out)
fm._get_output_features(neis, k=range(k_p), typefeats=t_feat_out)
if i_selector == 0:
fm._get_output_features([50], k=range(k_p), typefeats=[(0, 0)])
desc_nei0 = fm._get_output_features(nei_info0, range(k_p), tf_out0)
desc_nei1 = fm._get_output_features(nei_info1, range(k_p), tf_out1)
# print fm._get_output_features
assert(len(desc_nei0) == k_p)
assert(len(desc_nei1) == k_p)
assert(len(desc_nei0[0]) == 1)
assert(len(desc_nei1[0]) == 4)
# print desc_i0, desc_i1, desc_nei
# print type(desc_i0), type(desc_i1), type(desc_nei)
# print len(desc_i0), len(desc_i1), len(desc_nei)
## Interaction with map_vals_i
fm._get_vals_i(20, range(k_p))
fm._get_vals_i(range(20), range(k_p))
vals_i0 = fm._get_vals_i(i0, range(k_p))
vals_i1 = fm._get_vals_i(i1, range(k_p))
# print fm._get_vals_i, vals_i0, vals_i1
assert(len(vals_i0) == k_p)
assert(len(vals_i1) == k_p)
assert(len(vals_i0[0]) == 1)
assert(len(vals_i1[0]) == 4)
## Completing features
fm._complete_desc_i(i0, nei_info0, desc_i0, desc_nei0, vals_i0,
tf_desc0)
fm._complete_desc_i(i1, nei_info1, desc_i1, desc_nei1, vals_i1,
tf_desc1)
fm._complete_desc_i(i0, nei_info0, desc_i0, desc_nei0, vals_i0,
(1, 0))
fm._complete_desc_i(i1, nei_info1, desc_i1, desc_nei1, vals_i1,
(1, 0))
## Computing altogether
fm.compute_descriptors(i0, nei_info0)
fm.compute_descriptors(i1, nei_info1)
fm.compute_descriptors(i0, nei_info0, range(k_p))
fm.compute_descriptors(i1, nei_info1, range(k_p))
# fm.compute_descriptors(i0, range(10), range(k_p))
# fm.compute_descriptors(i1, range(10), range(k_p))
fm.compute_descriptors(i0, neis0[0], range(k_p))
fm.compute_descriptors(i1, neis[0], range(k_p))
fm.compute_descriptors(i0, neis0, range(k_p))
fm.compute_descriptors(i1, neis, range(k_p))
if i_selector == 0:
fm.compute_descriptors([50], neis0[0], k=range(k_p),
feat_selectors=[tuple([(0, 0)]*3)])
# Strange cases
if mode is None:
FeaturesManager([ImplicitFeatures(feats1),
ImplicitFeatures(feats3, names=[3, 4])],
mode=mode)
## Test resulter functions
test_resulter_functions(fm)
## Cases
feats = [ImplicitFeatures(feats1), ImplicitFeatures(feats1)]
fm = FeaturesManager(feats, maps_input=m_input, maps_output=m_out,
maps_vals_i=m_vals_i, mode=mode,
descriptormodels=desc, selectors=selectors)
if all([fea.typefeat == 'implicit' for fea in fm.features]):
fm.add_perturbations(perturbation)
## Impossible function cases
feats = [ImplicitFeatures(feats1), ImplicitFeatures(feats3, names=[3, 4])]
try:
## Different variablesnames for sequential mode
boolean = False
fm = FeaturesManager(feats, maps_input=m_input, maps_output=m_out,
maps_vals_i=m_vals_i, mode='sequential',
descriptormodels=desc, selectors=selectors)
boolean = True
raise Exception("It has to halt here.")
except:
if boolean:
raise Exception("It has to halt here.")
try:
boolean = False
fm[-1]
boolean = True
raise Exception("It has to halt here.")
except:
if boolean:
raise Exception("It has to halt here.")
###########################################################################
#### Testing auxiliar parsing
## Functions which carry the uniformation of inputs from possible ways to
## input features information.
##
feats0 = np.random.randint(0, 10, 100)
feats1 = feats0.reshape((100, 1))
feats2 = np.random.random((100, 2, 3))
desc = DummyDescriptor()
pars_feats = {}
# Testing combinations of possible inputs
feats_info = feats0
features_ret = _features_parsing_creation(feats_info)
assert(isinstance(features_ret, FeaturesManager))
feats_info = feats1
features_ret = _features_parsing_creation(feats_info)
assert(isinstance(features_ret, FeaturesManager))
feats_info = feats2
features_ret = _features_parsing_creation(feats_info)
assert(isinstance(features_ret, FeaturesManager))
feats_info = (feats0, pars_feats)
features_ret = _features_parsing_creation(feats_info)
assert(isinstance(features_ret, FeaturesManager))
feats_info = (feats1, pars_feats)
features_ret = _features_parsing_creation(feats_info)
assert(isinstance(features_ret, FeaturesManager))
feats_info = (feats2, pars_feats)
features_ret = _features_parsing_creation(feats_info)
assert(isinstance(features_ret, FeaturesManager))
feats_info = (feats0, pars_feats, desc)
features_ret = _features_parsing_creation(feats_info)
assert(isinstance(features_ret, FeaturesManager))
pars_feats = {}
feats_info = (feats1, pars_feats, desc)
features_ret = _features_parsing_creation(feats_info)
assert(isinstance(features_ret, FeaturesManager))
pars_feats = {}
feats_info = (feats2, pars_feats, desc)
features_ret = _features_parsing_creation(feats_info)
assert(isinstance(features_ret, FeaturesManager))
features_obj = _featuresobject_parsing_creation(feats_info)
assert(isinstance(features_obj, BaseFeatures))
features_ret = _features_parsing_creation(features_obj)
assert(isinstance(features_ret, FeaturesManager))
features_ret = _features_parsing_creation([features_obj])
assert(isinstance(features_ret, FeaturesManager))
features_obj = _featuresobject_parsing_creation(feats_info)
assert(isinstance(features_obj, BaseFeatures))
pars_feats = {}
feats_info = (features_obj, pars_feats)
features_ret = _features_parsing_creation(feats_info)
assert(isinstance(features_ret, FeaturesManager))
pars_feats = {}
feats_info = ([features_obj], pars_feats)
features_ret = _features_parsing_creation(feats_info)
assert(isinstance(features_ret, FeaturesManager))
pars_feats = {}
feats_info = (features_obj, pars_feats, desc)
features_ret = _features_parsing_creation(feats_info)
assert(isinstance(features_ret, FeaturesManager))
pars_feats = {}
feats_info = (features_obj, pars_feats, [desc, desc])
features_ret = _features_parsing_creation(feats_info)
assert(isinstance(features_ret, FeaturesManager))
feats_info = ((feats0, {}), pars_feats)
features_ret = _features_parsing_creation(feats_info)
assert(isinstance(features_ret, FeaturesManager))
pars_feats = {}
feats_info = ((feats0, {}), pars_feats, desc)
features_ret = _features_parsing_creation(feats_info)
assert(isinstance(features_ret, FeaturesManager))
pars_feats = {}
feats_info = ((feats0, {}), pars_feats, [desc, desc])
features_ret = _features_parsing_creation(feats_info)
assert(isinstance(features_ret, FeaturesManager))
pars_feats = {}
feats_info = ((feats0, {}, desc), pars_feats, [desc, desc])
features_ret = _features_parsing_creation(feats_info)
assert(isinstance(features_ret, FeaturesManager))
feats_info = features_ret
features_ret = _features_parsing_creation(feats_info)
assert(isinstance(features_ret, FeaturesManager))
features_ret = _featuresmanager_parsing_creation(features_obj)
assert(isinstance(features_ret, FeaturesManager))
features_ret = _featuresmanager_parsing_creation(features_ret)
assert(isinstance(features_ret, FeaturesManager))
feats_info = ((feats0, {}, desc), {})
features_ret = _featuresmanager_parsing_creation(features_ret)
assert(isinstance(features_ret, FeaturesManager))
feats_info = ((feats0, {}, desc), {}, [desc, desc])
features_ret = _featuresmanager_parsing_creation(features_ret)
assert(isinstance(features_ret, FeaturesManager))
| [
"pySpatialTools.FeatureManagement.features_objects.ExplicitFeatures",
"pySpatialTools.FeatureManagement.Descriptors.DummyDescriptor",
"pySpatialTools.FeatureManagement.aux_resulter_building.creation_initialization_output_list",
"pySpatialTools.FeatureManagement.aux_resulter_building.creation_initialization_ou... | [((2331, 2355), 'numpy.random.randint', 'np.random.randint', (['(1)', '(20)'], {}), '(1, 20)\n', (2348, 2355), True, 'import numpy as np\n'), ((2519, 2531), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (2528, 2531), True, 'import numpy as np\n'), ((2682, 2716), 'pySpatialTools.utils.perturbations.PermutationPerturbation', 'PermutationPerturbation', (['reindices'], {}), '(reindices)\n', (2705, 2716), False, 'from pySpatialTools.utils.perturbations import PermutationPerturbation\n'), ((2741, 2786), 'pySpatialTools.utils.artificial_data.categorical_agg_dict_features', 'categorical_agg_dict_features', (['n', 'n_feats', 'ks'], {}), '(n, n_feats, ks)\n', (2770, 2786), False, 'from pySpatialTools.utils.artificial_data import categorical_agg_dict_features\n'), ((4647, 4668), 'numpy.random.random', 'np.random.random', (['(100)'], {}), '(100)\n', (4663, 4668), True, 'import numpy as np\n'), ((4682, 4708), 'numpy.random.random', 'np.random.random', (['(100, 1)'], {}), '((100, 1))\n', (4698, 4708), True, 'import numpy as np\n'), ((4722, 4751), 'numpy.random.random', 'np.random.random', (['(100, 1, 1)'], {}), '((100, 1, 1))\n', (4738, 4751), True, 'import numpy as np\n'), ((4765, 4791), 'numpy.random.random', 'np.random.random', (['(100, 2)'], {}), '((100, 2))\n', (4781, 4791), True, 'import numpy as np\n'), ((4807, 4831), 'pySpatialTools.FeatureManagement.features_objects.ImplicitFeatures', 'ImplicitFeatures', (['feats1'], {}), '(feats1)\n', (4823, 4831), False, 'from pySpatialTools.FeatureManagement.features_objects import ImplicitFeatures, ExplicitFeatures, BaseFeatures, _featuresobject_parsing_creation\n'), ((4848, 4886), 'pySpatialTools.FeatureManagement.features_objects.ImplicitFeatures', 'ImplicitFeatures', (['feats3'], {'names': '[3, 4]'}), '(feats3, names=[3, 4])\n', (4864, 4886), False, 'from pySpatialTools.FeatureManagement.features_objects import ImplicitFeatures, ExplicitFeatures, BaseFeatures, _featuresobject_parsing_creation\n'), ((4902, 4936), 'pySpatialTools.FeatureManagement.features_objects.ExplicitFeatures', 'ExplicitFeatures', (['aggcatfeats_dict'], {}), '(aggcatfeats_dict)\n', (4918, 4936), False, 'from pySpatialTools.FeatureManagement.features_objects import ImplicitFeatures, ExplicitFeatures, BaseFeatures, _featuresobject_parsing_creation\n'), ((4951, 4966), 'pySpatialTools.FeatureManagement.Descriptors.AvgDescriptor', 'AvgDescriptor', ([], {}), '()\n', (4964, 4966), False, 'from pySpatialTools.FeatureManagement.Descriptors import AvgDescriptor\n'), ((5769, 5782), 'pySpatialTools.utils.neighs_info.Neighs_Info', 'Neighs_Info', ([], {}), '()\n', (5780, 5782), False, 'from pySpatialTools.utils.neighs_info import Neighs_Info\n'), ((5903, 5916), 'pySpatialTools.utils.neighs_info.Neighs_Info', 'Neighs_Info', ([], {}), '()\n', (5914, 5916), False, 'from pySpatialTools.utils.neighs_info import Neighs_Info\n'), ((6018, 6041), 'itertools.product', 'product', (['*possibilities'], {}), '(*possibilities)\n', (6025, 6041), False, 'from itertools import product\n'), ((11567, 11710), 'pySpatialTools.FeatureManagement.features_retriever.FeaturesManager', 'FeaturesManager', (['feats'], {'maps_input': 'm_input', 'maps_output': 'm_out', 'maps_vals_i': 'm_vals_i', 'mode': 'mode', 'descriptormodels': 'desc', 'selectors': 'selectors'}), '(feats, maps_input=m_input, maps_output=m_out, maps_vals_i=\n m_vals_i, mode=mode, descriptormodels=desc, selectors=selectors)\n', (11582, 11710), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((12891, 12920), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', '(100)'], {}), '(0, 10, 100)\n', (12908, 12920), True, 'import numpy as np\n'), ((12972, 13001), 'numpy.random.random', 'np.random.random', (['(100, 2, 3)'], {}), '((100, 2, 3))\n', (12988, 13001), True, 'import numpy as np\n'), ((13013, 13030), 'pySpatialTools.FeatureManagement.Descriptors.DummyDescriptor', 'DummyDescriptor', ([], {}), '()\n', (13028, 13030), False, 'from pySpatialTools.FeatureManagement.Descriptors import DummyDescriptor\n'), ((13141, 13179), 'pySpatialTools.FeatureManagement.features_retriever._features_parsing_creation', '_features_parsing_creation', (['feats_info'], {}), '(feats_info)\n', (13167, 13179), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((13277, 13315), 'pySpatialTools.FeatureManagement.features_retriever._features_parsing_creation', '_features_parsing_creation', (['feats_info'], {}), '(feats_info)\n', (13303, 13315), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((13413, 13451), 'pySpatialTools.FeatureManagement.features_retriever._features_parsing_creation', '_features_parsing_creation', (['feats_info'], {}), '(feats_info)\n', (13439, 13451), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((13563, 13601), 'pySpatialTools.FeatureManagement.features_retriever._features_parsing_creation', '_features_parsing_creation', (['feats_info'], {}), '(feats_info)\n', (13589, 13601), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((13713, 13751), 'pySpatialTools.FeatureManagement.features_retriever._features_parsing_creation', '_features_parsing_creation', (['feats_info'], {}), '(feats_info)\n', (13739, 13751), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((13863, 13901), 'pySpatialTools.FeatureManagement.features_retriever._features_parsing_creation', '_features_parsing_creation', (['feats_info'], {}), '(feats_info)\n', (13889, 13901), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((14019, 14057), 'pySpatialTools.FeatureManagement.features_retriever._features_parsing_creation', '_features_parsing_creation', (['feats_info'], {}), '(feats_info)\n', (14045, 14057), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((14195, 14233), 'pySpatialTools.FeatureManagement.features_retriever._features_parsing_creation', '_features_parsing_creation', (['feats_info'], {}), '(feats_info)\n', (14221, 14233), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((14371, 14409), 'pySpatialTools.FeatureManagement.features_retriever._features_parsing_creation', '_features_parsing_creation', (['feats_info'], {}), '(feats_info)\n', (14397, 14409), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((14484, 14528), 'pySpatialTools.FeatureManagement.features_objects._featuresobject_parsing_creation', '_featuresobject_parsing_creation', (['feats_info'], {}), '(feats_info)\n', (14516, 14528), False, 'from pySpatialTools.FeatureManagement.features_objects import ImplicitFeatures, ExplicitFeatures, BaseFeatures, _featuresobject_parsing_creation\n'), ((14599, 14639), 'pySpatialTools.FeatureManagement.features_retriever._features_parsing_creation', '_features_parsing_creation', (['features_obj'], {}), '(features_obj)\n', (14625, 14639), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((14713, 14755), 'pySpatialTools.FeatureManagement.features_retriever._features_parsing_creation', '_features_parsing_creation', (['[features_obj]'], {}), '([features_obj])\n', (14739, 14755), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((14829, 14873), 'pySpatialTools.FeatureManagement.features_objects._featuresobject_parsing_creation', '_featuresobject_parsing_creation', (['feats_info'], {}), '(feats_info)\n', (14861, 14873), False, 'from pySpatialTools.FeatureManagement.features_objects import ImplicitFeatures, ExplicitFeatures, BaseFeatures, _featuresobject_parsing_creation\n'), ((15008, 15046), 'pySpatialTools.FeatureManagement.features_retriever._features_parsing_creation', '_features_parsing_creation', (['feats_info'], {}), '(feats_info)\n', (15034, 15046), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((15186, 15224), 'pySpatialTools.FeatureManagement.features_retriever._features_parsing_creation', '_features_parsing_creation', (['feats_info'], {}), '(feats_info)\n', (15212, 15224), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((15368, 15406), 'pySpatialTools.FeatureManagement.features_retriever._features_parsing_creation', '_features_parsing_creation', (['feats_info'], {}), '(feats_info)\n', (15394, 15406), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((15558, 15596), 'pySpatialTools.FeatureManagement.features_retriever._features_parsing_creation', '_features_parsing_creation', (['feats_info'], {}), '(feats_info)\n', (15584, 15596), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((15715, 15753), 'pySpatialTools.FeatureManagement.features_retriever._features_parsing_creation', '_features_parsing_creation', (['feats_info'], {}), '(feats_info)\n', (15741, 15753), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((15897, 15935), 'pySpatialTools.FeatureManagement.features_retriever._features_parsing_creation', '_features_parsing_creation', (['feats_info'], {}), '(feats_info)\n', (15923, 15935), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((16087, 16125), 'pySpatialTools.FeatureManagement.features_retriever._features_parsing_creation', '_features_parsing_creation', (['feats_info'], {}), '(feats_info)\n', (16113, 16125), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((16283, 16321), 'pySpatialTools.FeatureManagement.features_retriever._features_parsing_creation', '_features_parsing_creation', (['feats_info'], {}), '(feats_info)\n', (16309, 16321), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((16426, 16464), 'pySpatialTools.FeatureManagement.features_retriever._features_parsing_creation', '_features_parsing_creation', (['feats_info'], {}), '(feats_info)\n', (16452, 16464), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((16539, 16586), 'pySpatialTools.FeatureManagement.features_retriever._featuresmanager_parsing_creation', '_featuresmanager_parsing_creation', (['features_obj'], {}), '(features_obj)\n', (16572, 16586), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((16660, 16707), 'pySpatialTools.FeatureManagement.features_retriever._featuresmanager_parsing_creation', '_featuresmanager_parsing_creation', (['features_ret'], {}), '(features_ret)\n', (16693, 16707), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((16824, 16871), 'pySpatialTools.FeatureManagement.features_retriever._featuresmanager_parsing_creation', '_featuresmanager_parsing_creation', (['features_ret'], {}), '(features_ret)\n', (16857, 16871), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((17001, 17048), 'pySpatialTools.FeatureManagement.features_retriever._featuresmanager_parsing_creation', '_featuresmanager_parsing_creation', (['features_ret'], {}), '(features_ret)\n', (17034, 17048), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((1557, 1587), 'pySpatialTools.FeatureManagement.aux_resulter_building.creation_concatenator_joiner', 'creation_concatenator_joiner', ([], {}), '()\n', (1585, 1587), False, 'from pySpatialTools.FeatureManagement.aux_resulter_building import DefaultResulter, GeneralResulter, default_creation_initializations, creation_concatenator_joiner, creation_null_joiner, creation_initialization_output_closearray, creation_initialization_output_list, creation_initialization_output_lists, creation_initialization_output_list_selfdriven, creation_initialization_desc_dict, creation_initialization_desc_array\n'), ((1596, 1618), 'pySpatialTools.FeatureManagement.aux_resulter_building.creation_null_joiner', 'creation_null_joiner', ([], {}), '()\n', (1616, 1618), False, 'from pySpatialTools.FeatureManagement.aux_resulter_building import DefaultResulter, GeneralResulter, default_creation_initializations, creation_concatenator_joiner, creation_null_joiner, creation_initialization_output_closearray, creation_initialization_output_list, creation_initialization_output_lists, creation_initialization_output_list_selfdriven, creation_initialization_desc_dict, creation_initialization_desc_array\n'), ((1662, 1698), 'pySpatialTools.FeatureManagement.aux_resulter_building.default_creation_initializations', 'default_creation_initializations', (['fm'], {}), '(fm)\n', (1694, 1698), False, 'from pySpatialTools.FeatureManagement.aux_resulter_building import DefaultResulter, GeneralResulter, default_creation_initializations, creation_concatenator_joiner, creation_null_joiner, creation_initialization_output_closearray, creation_initialization_output_list, creation_initialization_output_lists, creation_initialization_output_list_selfdriven, creation_initialization_desc_dict, creation_initialization_desc_array\n'), ((1824, 1863), 'pySpatialTools.FeatureManagement.aux_resulter_building.creation_initialization_output_list', 'creation_initialization_output_list', (['fm'], {}), '(fm)\n', (1859, 1863), False, 'from pySpatialTools.FeatureManagement.aux_resulter_building import DefaultResulter, GeneralResulter, default_creation_initializations, creation_concatenator_joiner, creation_null_joiner, creation_initialization_output_closearray, creation_initialization_output_list, creation_initialization_output_lists, creation_initialization_output_list_selfdriven, creation_initialization_desc_dict, creation_initialization_desc_array\n'), ((1872, 1912), 'pySpatialTools.FeatureManagement.aux_resulter_building.creation_initialization_output_lists', 'creation_initialization_output_lists', (['fm'], {}), '(fm)\n', (1908, 1912), False, 'from pySpatialTools.FeatureManagement.aux_resulter_building import DefaultResulter, GeneralResulter, default_creation_initializations, creation_concatenator_joiner, creation_null_joiner, creation_initialization_output_closearray, creation_initialization_output_list, creation_initialization_output_lists, creation_initialization_output_list_selfdriven, creation_initialization_desc_dict, creation_initialization_desc_array\n'), ((1921, 1971), 'pySpatialTools.FeatureManagement.aux_resulter_building.creation_initialization_output_list_selfdriven', 'creation_initialization_output_list_selfdriven', (['fm'], {}), '(fm)\n', (1967, 1971), False, 'from pySpatialTools.FeatureManagement.aux_resulter_building import DefaultResulter, GeneralResulter, default_creation_initializations, creation_concatenator_joiner, creation_null_joiner, creation_initialization_output_closearray, creation_initialization_output_list, creation_initialization_output_lists, creation_initialization_output_list_selfdriven, creation_initialization_desc_dict, creation_initialization_desc_array\n'), ((1980, 2017), 'pySpatialTools.FeatureManagement.aux_resulter_building.creation_initialization_desc_dict', 'creation_initialization_desc_dict', (['fm'], {}), '(fm)\n', (2013, 2017), False, 'from pySpatialTools.FeatureManagement.aux_resulter_building import DefaultResulter, GeneralResulter, default_creation_initializations, creation_concatenator_joiner, creation_null_joiner, creation_initialization_output_closearray, creation_initialization_output_list, creation_initialization_output_lists, creation_initialization_output_list_selfdriven, creation_initialization_desc_dict, creation_initialization_desc_array\n'), ((2026, 2064), 'pySpatialTools.FeatureManagement.aux_resulter_building.creation_initialization_desc_array', 'creation_initialization_desc_array', (['fm'], {}), '(fm)\n', (2060, 2064), False, 'from pySpatialTools.FeatureManagement.aux_resulter_building import DefaultResulter, GeneralResulter, default_creation_initializations, creation_concatenator_joiner, creation_null_joiner, creation_initialization_output_closearray, creation_initialization_output_list, creation_initialization_output_lists, creation_initialization_output_list_selfdriven, creation_initialization_desc_dict, creation_initialization_desc_array\n'), ((2123, 2142), 'pySpatialTools.FeatureManagement.aux_resulter_building.DefaultResulter', 'DefaultResulter', (['fm'], {}), '(fm)\n', (2138, 2142), False, 'from pySpatialTools.FeatureManagement.aux_resulter_building import DefaultResulter, GeneralResulter, default_creation_initializations, creation_concatenator_joiner, creation_null_joiner, creation_initialization_output_closearray, creation_initialization_output_list, creation_initialization_output_lists, creation_initialization_output_list_selfdriven, creation_initialization_desc_dict, creation_initialization_desc_array\n'), ((2268, 2295), 'numpy.random.randint', 'np.random.randint', (['(10)', '(1000)'], {}), '(10, 1000)\n', (2285, 2295), True, 'import numpy as np\n'), ((2297, 2321), 'numpy.random.randint', 'np.random.randint', (['(1)', '(20)'], {}), '(1, 20)\n', (2314, 2321), True, 'import numpy as np\n'), ((2910, 2937), 'pySpatialTools.FeatureManagement.features_retriever.FeaturesManager', 'FeaturesManager', (['None', 'None'], {}), '(None, None)\n', (2925, 2937), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((3184, 3199), 'pySpatialTools.FeatureManagement.Descriptors.AvgDescriptor', 'AvgDescriptor', ([], {}), '()\n', (3197, 3199), False, 'from pySpatialTools.FeatureManagement.Descriptors import AvgDescriptor\n'), ((3213, 3241), 'pySpatialTools.FeatureManagement.features_retriever.FeaturesManager', 'FeaturesManager', (['[]', 'avgdesc'], {}), '([], avgdesc)\n', (3228, 3241), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((3910, 3943), 'pySpatialTools.FeatureManagement.features_retriever.FeaturesManager', 'FeaturesManager', (['[feats0, feats1]'], {}), '([feats0, feats1])\n', (3925, 3943), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((4186, 4206), 'pySpatialTools.FeatureManagement.features_retriever.FeaturesManager', 'FeaturesManager', (['[5]'], {}), '([5])\n', (4201, 4206), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((4449, 4477), 'pySpatialTools.FeatureManagement.features_retriever.FeaturesManager', 'FeaturesManager', (['(lambda x: x)'], {}), '(lambda x: x)\n', (4464, 4477), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((5861, 5886), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (5878, 5886), True, 'import numpy as np\n'), ((6812, 6955), 'pySpatialTools.FeatureManagement.features_retriever.FeaturesManager', 'FeaturesManager', (['feats'], {'maps_input': 'm_input', 'maps_output': 'm_out', 'maps_vals_i': 'm_vals_i', 'mode': 'mode', 'descriptormodels': 'desc', 'selectors': 'selectors'}), '(feats, maps_input=m_input, maps_output=m_out, maps_vals_i=\n m_vals_i, mode=mode, descriptormodels=desc, selectors=selectors)\n', (6827, 6955), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((7115, 7128), 'pySpatialTools.utils.neighs_info.Neighs_Info', 'Neighs_Info', ([], {}), '()\n', (7126, 7128), False, 'from pySpatialTools.utils.neighs_info import Neighs_Info\n'), ((7149, 7162), 'pySpatialTools.utils.neighs_info.Neighs_Info', 'Neighs_Info', ([], {}), '()\n', (7160, 7162), False, 'from pySpatialTools.utils.neighs_info import Neighs_Info\n'), ((11506, 11530), 'pySpatialTools.FeatureManagement.features_objects.ImplicitFeatures', 'ImplicitFeatures', (['feats1'], {}), '(feats1)\n', (11522, 11530), False, 'from pySpatialTools.FeatureManagement.features_objects import ImplicitFeatures, ExplicitFeatures, BaseFeatures, _featuresobject_parsing_creation\n'), ((11532, 11556), 'pySpatialTools.FeatureManagement.features_objects.ImplicitFeatures', 'ImplicitFeatures', (['feats1'], {}), '(feats1)\n', (11548, 11556), False, 'from pySpatialTools.FeatureManagement.features_objects import ImplicitFeatures, ExplicitFeatures, BaseFeatures, _featuresobject_parsing_creation\n'), ((11911, 11935), 'pySpatialTools.FeatureManagement.features_objects.ImplicitFeatures', 'ImplicitFeatures', (['feats1'], {}), '(feats1)\n', (11927, 11935), False, 'from pySpatialTools.FeatureManagement.features_objects import ImplicitFeatures, ExplicitFeatures, BaseFeatures, _featuresobject_parsing_creation\n'), ((11937, 11975), 'pySpatialTools.FeatureManagement.features_objects.ImplicitFeatures', 'ImplicitFeatures', (['feats3'], {'names': '[3, 4]'}), '(feats3, names=[3, 4])\n', (11953, 11975), False, 'from pySpatialTools.FeatureManagement.features_objects import ImplicitFeatures, ExplicitFeatures, BaseFeatures, _featuresobject_parsing_creation\n'), ((12079, 12230), 'pySpatialTools.FeatureManagement.features_retriever.FeaturesManager', 'FeaturesManager', (['feats'], {'maps_input': 'm_input', 'maps_output': 'm_out', 'maps_vals_i': 'm_vals_i', 'mode': '"""sequential"""', 'descriptormodels': 'desc', 'selectors': 'selectors'}), "(feats, maps_input=m_input, maps_output=m_out, maps_vals_i=\n m_vals_i, mode='sequential', descriptormodels=desc, selectors=selectors)\n", (12094, 12230), False, 'from pySpatialTools.FeatureManagement.features_retriever import FeaturesManager, _features_parsing_creation, _featuresmanager_parsing_creation\n'), ((1770, 1815), 'pySpatialTools.FeatureManagement.aux_resulter_building.creation_initialization_output_closearray', 'creation_initialization_output_closearray', (['fm'], {}), '(fm)\n', (1811, 1815), False, 'from pySpatialTools.FeatureManagement.aux_resulter_building import DefaultResulter, GeneralResulter, default_creation_initializations, creation_concatenator_joiner, creation_null_joiner, creation_initialization_output_closearray, creation_initialization_output_list, creation_initialization_output_lists, creation_initialization_output_list_selfdriven, creation_initialization_desc_dict, creation_initialization_desc_array\n'), ((3801, 3830), 'numpy.random.random', 'np.random.random', (['(100, 2, 4)'], {}), '((100, 2, 4))\n', (3817, 3830), True, 'import numpy as np\n'), ((3866, 3895), 'numpy.random.random', 'np.random.random', (['(100, 3, 3)'], {}), '((100, 3, 3))\n', (3882, 3895), True, 'import numpy as np\n'), ((5934, 5963), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)', '(20)'], {}), '(0, 100, 20)\n', (5951, 5963), True, 'import numpy as np\n'), ((7178, 7212), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)', '(8 * k_p)'], {}), '(0, 100, 8 * k_p)\n', (7195, 7212), True, 'import numpy as np\n'), ((7248, 7282), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)', '(2 * k_p)'], {}), '(0, 100, 2 * k_p)\n', (7265, 7282), True, 'import numpy as np\n'), ((2572, 2596), 'numpy.random.permutation', 'np.random.permutation', (['n'], {}), '(n)\n', (2593, 2596), True, 'import numpy as np\n'), ((11273, 11297), 'pySpatialTools.FeatureManagement.features_objects.ImplicitFeatures', 'ImplicitFeatures', (['feats1'], {}), '(feats1)\n', (11289, 11297), False, 'from pySpatialTools.FeatureManagement.features_objects import ImplicitFeatures, ExplicitFeatures, BaseFeatures, _featuresobject_parsing_creation\n'), ((11328, 11366), 'pySpatialTools.FeatureManagement.features_objects.ImplicitFeatures', 'ImplicitFeatures', (['feats3'], {'names': '[3, 4]'}), '(feats3, names=[3, 4])\n', (11344, 11366), False, 'from pySpatialTools.FeatureManagement.features_objects import ImplicitFeatures, ExplicitFeatures, BaseFeatures, _featuresobject_parsing_creation\n'), ((5550, 5563), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (5558, 5563), True, 'import numpy as np\n'), ((5565, 5578), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (5573, 5578), True, 'import numpy as np\n')] |
import numpy as np
import os
root = '/home/user4/database/BosphorusDB/'
train_data = os.path.join(root, 'bosphorus_id_train.txt')
train_label = os.path.join(root, 'bosphorus_label_train.txt')
test_data = os.path.join(root, 'bosphorus_id_test.txt')
test_label = os.path.join(root, 'bosphorus_label_test.txt')
def _get_data_files(list_filename):
with open(list_filename) as f:
# return [line.rstrip()[5:] for line in f]
return np.array([line for line in f])
def _split_data(data, label, val=False):
# num_example = data.shape[0]
num_example = len(data)
arr = np.arange(num_example)
np.random.shuffle(arr)
data, label = (data[arr], label[arr])
if val:
ratio0, ratio1 =0.8, 0.9
s0 = np.int(num_example*ratio0)
s1 = np.int(num_example*ratio1)
# samples splited
x_train = data[:s0]
y_train = label[:s0]
x_val = data[s0:s1]
y_val = label[s0:s1]
x_test = data[s1:]
y_test = label[s1:]
return x_train, y_train, x_val, y_val, x_test, y_test
else:
ratio = 0.9
s = np.int(num_example*ratio)
x_train = data[:s]
y_train = label[:s]
x_test = data[s:]
y_test = label[s:]
return x_train, y_train, x_test, y_test
data_path = os.path.join(root, 'bosphorus_id.txt')
label_path = os.path.join(root, 'bosphorus_id_label.txt')
data = _get_data_files(data_path)
label = _get_data_files(label_path)
x_train, y_train, x_test, y_test = _split_data(data, label)
with open(train_data, 'w') as f_trd:
f_trd.writelines(x_train)
with open(train_label, 'w') as f_trl:
f_trl.writelines(y_train)
with open(test_data, 'w') as f_ted:
f_ted.writelines(x_test)
with open(test_label, 'w') as f_tel:
f_tel.writelines(y_test)
| [
"numpy.array",
"numpy.arange",
"numpy.int",
"os.path.join",
"numpy.random.shuffle"
] | [((86, 130), 'os.path.join', 'os.path.join', (['root', '"""bosphorus_id_train.txt"""'], {}), "(root, 'bosphorus_id_train.txt')\n", (98, 130), False, 'import os\n'), ((145, 192), 'os.path.join', 'os.path.join', (['root', '"""bosphorus_label_train.txt"""'], {}), "(root, 'bosphorus_label_train.txt')\n", (157, 192), False, 'import os\n'), ((205, 248), 'os.path.join', 'os.path.join', (['root', '"""bosphorus_id_test.txt"""'], {}), "(root, 'bosphorus_id_test.txt')\n", (217, 248), False, 'import os\n'), ((262, 308), 'os.path.join', 'os.path.join', (['root', '"""bosphorus_label_test.txt"""'], {}), "(root, 'bosphorus_label_test.txt')\n", (274, 308), False, 'import os\n'), ((1306, 1344), 'os.path.join', 'os.path.join', (['root', '"""bosphorus_id.txt"""'], {}), "(root, 'bosphorus_id.txt')\n", (1318, 1344), False, 'import os\n'), ((1358, 1402), 'os.path.join', 'os.path.join', (['root', '"""bosphorus_id_label.txt"""'], {}), "(root, 'bosphorus_id_label.txt')\n", (1370, 1402), False, 'import os\n'), ((593, 615), 'numpy.arange', 'np.arange', (['num_example'], {}), '(num_example)\n', (602, 615), True, 'import numpy as np\n'), ((620, 642), 'numpy.random.shuffle', 'np.random.shuffle', (['arr'], {}), '(arr)\n', (637, 642), True, 'import numpy as np\n'), ((447, 477), 'numpy.array', 'np.array', (['[line for line in f]'], {}), '([line for line in f])\n', (455, 477), True, 'import numpy as np\n'), ((743, 771), 'numpy.int', 'np.int', (['(num_example * ratio0)'], {}), '(num_example * ratio0)\n', (749, 771), True, 'import numpy as np\n'), ((783, 811), 'numpy.int', 'np.int', (['(num_example * ratio1)'], {}), '(num_example * ratio1)\n', (789, 811), True, 'import numpy as np\n'), ((1109, 1136), 'numpy.int', 'np.int', (['(num_example * ratio)'], {}), '(num_example * ratio)\n', (1115, 1136), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 22 23:11:34 2021
@author: Ioana
"""
import numpy as np
import tensorflow as tf
from dipy.io.streamline import load_tractogram
## Load data and labels as per Jean-Bernard's/Juan Jose's code
# function to convert each streamline into a fibermap
def get_fibermap(all_trajs, n):
fiber_map = np.zeros([2*n, 2*n, 3]) # define empty map for each streamline
all_fibre_map = np.zeros([len(all_trajs), 2*n, 2*n, 3]) # to store all maps
for j in range(len(all_trajs)):
data = all_trajs[j] # choose one streamline
for i in range(3): # for each dimension in streamline
stream = data[:,i]
stream_rev = stream[::-1] # reverse
block1 = np.concatenate((stream, stream_rev), axis = 0) # build blocks
block2 = np.concatenate((stream_rev, stream), axis = 0)
cell = np.vstack((block1, block2)) # stack vertically
fiber_slice = np.tile(cell, (n,1)) # create fiber map
fiber_map[:,:,i] = fiber_slice # assign to map for each dimension
all_fibre_map[j,:,:,:] = fiber_map # save all maps from all streamlines
return all_fibre_map
# run function with streamline data
map1 = get_fibermap(streamlines, 20)
# convert to tensor for CNN with labels
dataset = tf.data.Dataset.from_tensor_slices((map1,all_labels)) # for CNN input
| [
"numpy.concatenate",
"numpy.zeros",
"tensorflow.data.Dataset.from_tensor_slices",
"numpy.tile",
"numpy.vstack"
] | [((1402, 1456), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(map1, all_labels)'], {}), '((map1, all_labels))\n', (1436, 1456), True, 'import tensorflow as tf\n'), ((361, 388), 'numpy.zeros', 'np.zeros', (['[2 * n, 2 * n, 3]'], {}), '([2 * n, 2 * n, 3])\n', (369, 388), True, 'import numpy as np\n'), ((789, 833), 'numpy.concatenate', 'np.concatenate', (['(stream, stream_rev)'], {'axis': '(0)'}), '((stream, stream_rev), axis=0)\n', (803, 833), True, 'import numpy as np\n'), ((873, 917), 'numpy.concatenate', 'np.concatenate', (['(stream_rev, stream)'], {'axis': '(0)'}), '((stream_rev, stream), axis=0)\n', (887, 917), True, 'import numpy as np\n'), ((942, 969), 'numpy.vstack', 'np.vstack', (['(block1, block2)'], {}), '((block1, block2))\n', (951, 969), True, 'import numpy as np\n'), ((1018, 1039), 'numpy.tile', 'np.tile', (['cell', '(n, 1)'], {}), '(cell, (n, 1))\n', (1025, 1039), True, 'import numpy as np\n')] |
from math import log
from ..ch3.models import LeastSquareModel, LinearModel
import numpy as np
from numpy import linalg as LA
from ..utils import lazy_method
from ..base import ClassificationMixin
__all__ = ['LinearRegressionIndicatorMatrix', 'LDAModel', 'QDAModel', 'RDAModel', 'LDAForComputation',
'ReducedRankLDAModel', 'BinaryLogisticRegression']
class LinearRegression(LinearModel):
def __init__(self, *args, n_class, **kwargs):
self.n_class = n_class
super().__init__(*args, **kwargs)
@property
@lazy_method
def error_rate(self):
return 1 - np.sum((self._raw_train_y == self.y_hat)) / self.N
class LinearRegressionIndicatorMatrix(ClassificationMixin, LeastSquareModel):
# def __init__(self, *args, **kwargs):
# self.n_class = kwargs.pop('n_class', 1)
# super().__init__(*args, **kwargs)
#
# def _pre_processing_y(self, y):
# iy = y.flatten()
# N = y.shape[0]
# if self.n_class > 1:
# Y = np.zeros((N, self.n_class))
# for i in range(N):
# k = iy[i]
# # k starts from 1
# Y[i, k-1] = 1
# else:
# return super()._pre_processing_y(y)
# return Y
def predict(self, X):
Y_hat = super().predict(X)
y = (Y_hat.argmax(axis=1)).reshape((-1, 1)) + 1
return y
@property
@lazy_method
def error_rate(self):
return 1 - np.sum((self._raw_train_y == self.y_hat)) / self.N
class LDAModel(LinearRegression):
"""
Linear Discriminant Analysis
from page 106
"""
def _pre_processing_x(self, X):
X = self.standardize(X)
return X
def train(self):
X = self.train_x
y = self.train_y
K = self.n_class
p = self.p
self.Mu = np.zeros((K, p))
self.Pi = np.zeros((K, 1))
self.Sigma_hat = np.zeros((p, p))
for k in range(K):
mask = (y == k+1)
N_k = sum(mask)
X_k = X[mask.flatten(), :]
self.Pi[k] = N_k / self.N
self.Mu[k] = np.sum(X_k, axis=0).reshape((1, -1)) / N_k
self.Sigma_hat = self.Sigma_hat + ((X_k - self.Mu[k]).T @ (X_k - self.Mu[k])) / (self.N - K)
def linear_discriminant_func(self, x, k):
"""
linear discriminant function.
Define by (4.10)
:return: delta_k(x)
"""
mu_k = self.Mu[k]
pi_k = self.Pi[k]
sigma_inv = self.math.pinv(self.Sigma_hat)
result = mu_k @ sigma_inv @ x.T - (mu_k @ sigma_inv @ mu_k.T)/2 + log(pi_k)
return result
def predict(self, X):
X = self._pre_processing_x(X)
N = X.shape[0]
Y = np.zeros((N, self.n_class))
for k in range(self.n_class):
# delta_k is (N x 1)
delta_k = self.linear_discriminant_func(X, k)
Y[:, k] = delta_k
# make the k start from 1
y_hat = Y.argmax(axis=1).reshape((-1, 1)) + 1
return y_hat
class QDAModel(LinearRegression):
"""
Quadratic Discriminant Analysis
pp. 110
Ref
---
http://www.wikicoursenote.com/wiki/Stat841#In_practice
"""
def train(self):
X = self.train_x
y = self.train_y
K = self.n_class
p = self.p
self.Mu = np.zeros((K, p))
self.Pi = np.zeros((K, 1))
self.Sigma_hat = []
for k in range(K):
mask = (y == k+1)
N_k = sum(mask)
X_k = X[mask.flatten(), :]
self.Pi[k] = N_k / self.N
self.Mu[k] = np.sum(X_k, axis=0).reshape((1, -1)) / N_k
# We div by N_k instead of (N-K)
self.Sigma_hat.append(((X_k - self.Mu[k]).T @ (X_k - self.Mu[k])) / N_k)
def quadratic_discriminant_func(self, x, k):
mu_k = self.Mu[k]
pi_k = self.Pi[k]
sigma_k = self.Sigma_hat[k]
pinv = self.math.pinv
# assume that each row of x contain observation
result = -(np.log(np.linalg.det(sigma_k)))/2 \
- ((x - mu_k) @ pinv(sigma_k, rcond=0) @ (x - mu_k).T)/2 + log(pi_k)
return result
def predict(self, X):
X = self._pre_processing_x(X)
N = X.shape[0]
Y = np.zeros((N, self.n_class))
for k in range(self.n_class):
# the intuitive solution is use np.apply_along_axis, but is too slow
# delta_k is (N x 1)
# delta_k_func = partial(self.linear_discriminant_func, k=k)
# delta_k = np.apply_along_axis(delta_k_func, 1, X)
# d is NxN,
# Let B = A@A.T, the diagonal of B is [A[i] @ A[i].T for i in range(A.shape(0)]
d = self.quadratic_discriminant_func(X, k)
Y[:, k] = d.diagonal()
# make the k start from 1
y_hat = Y.argmax(axis=1).reshape((-1, 1)) + 1
return y_hat
class RDAModel(QDAModel):
def __init__(self, *args, alpha=1, **kwargs):
self.alpha = alpha
super().__init__(*args, **kwargs)
def train(self):
X = self.train_x
y = self.train_y
K = self.n_class
p = self.p
self.Mu = np.zeros((K, p))
self.Pi = np.zeros((K, 1))
# list of sigma_k
self.Sigma_hat = []
# the sum of sigma_k
self.Sigma_tot = np.zeros((1, p))
for k in range(K):
mask = (y == k+1)
N_k = sum(mask)
X_k = X[mask.flatten(), :]
self.Pi[k] = N_k / self.N
self.Mu[k] = np.sum(X_k, axis=0).reshape((1, -1)) / N_k
# We div by N_k instead of (N-K)
self.Sigma_hat.append(((X_k - self.Mu[k]).T @ (X_k - self.Mu[k])) / N_k)
self.Sigma_tot = self.Sigma_tot + (X_k - self.Mu[k]).T @ (X_k - self.Mu[k])
self.Sigma_tot = self.Sigma_tot / (self.N - K)
for k in range(K):
self.Sigma_hat[k] = (self.Sigma_hat[k] * self.alpha) + self.Sigma_tot * (1 - self.alpha)
class LDAForComputation(LDAModel):
def train(self):
super().train()
sigma = self.Sigma_hat
D_, U = LA.eigh(sigma)
D = np.diagflat(D_)
self.A = np.power(LA.pinv(D), 0.5) @ U.T
def predict(self, X):
X = self._pre_processing_x(X)
Y = np.zeros((X.shape[0], self.n_class))
A = self.A
# because X is (N x p), A is (K x p), we can to get the X_star (NxK)
X_star = X @ A.T
for k in range(self.n_class):
# mu_s_star shape is (p,)
mu_k_star = A @ self.Mu[k]
# Ref: http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.norm.html
# Ref: http://stackoverflow.com/questions/1401712/how-can-the-euclidean-distance-be-calculated-with-numpy
Y[:, k] = LA.norm(X_star - mu_k_star, axis=1) * 0.5 - log(self.Pi[k])
# Python index start from 0, transform to start with 1
y_hat = Y.argmin(axis=1).reshape((-1, 1)) + 1
return y_hat
class ReducedRankLDAModel(LDAForComputation):
"""
page 113, 4.3.3
I also write a blog describe how to write RRLDA:
http://littlezz.github.io/how-to-write-reduced-rank-linear-discriminant-analysis-with-python.html
ref: http://sites.stat.psu.edu/~jiali/course/stat597e/notes2/lda2.pdf
"""
def __init__(self, *args, L, **kwargs):
self.L = L
super().__init__(*args, **kwargs)
def train(self):
super().train()
W = self.Sigma_hat
# prior probabilities (K, 1)
Pi = self.Pi
# class centroids (K, p)
Mu = self.Mu
p = self.p
# the number of class
K = self.n_class
# the dimension you want
L = self.L
# Mu is (K, p) matrix, Pi is (K, 1)
mu = np.sum(Pi * Mu, axis=0)
B = np.zeros((p, p))
for k in range(K):
# vector @ vector equal scalar, use vector[:, None] to transform to matrix
# vec[:, None] equal to vec.reshape((1, vec.shape[0]))
B = B + Pi[k]*((Mu[k] - mu)[:, None] @ ((Mu[k] - mu)[None, :]))
# Be careful, the `eigh` method get the eigenvalues in ascending , which is opposite to R.
Dw, Uw = LA.eigh(W)
# reverse the Dw_ and Uw
Dw = Dw[::-1]
Uw = np.fliplr(Uw)
W_half = self.math.pinv(np.diagflat(Dw**0.5) @ Uw.T)
B_star = W_half.T @ B @ W_half
D_, V = LA.eigh(B_star)
# reverse V
V = np.fliplr(V)
# overwrite `self.A` so that we can reuse `predict` method define in parent class
self.A = np.zeros((L, p))
for l in range(L):
self.A[l, :] = W_half @ V[:, l]
class BinaryLogisticRegression(LinearRegression):
"""
page 119.
two class case
note that self.W is the second partial derivative.
To get the same std.Err with book, you should set `do_standardization=False`
ref: http://sites.stat.psu.edu/~jiali/course/stat597e/notes2/logit.pdf
"""
def __init__(self, *args, max_iter=300, **kwargs):
self.max_iter = max_iter
super().__init__(*args, **kwargs)
def _pre_processing_x(self, X):
X = self.standardize(X)
X = np.insert(X, 0, [1], axis=1)
return X
def train(self):
X = self.train_x
y = self.train_y
# include intercept
beta = np.zeros((self.p+1, 1))
iter_times = 0
while True:
e_X = np.exp(X @ beta)
# N x 1
self.P = e_X / (1 + e_X)
# W is a vector
self.W = (self.P * (1 - self.P)).flatten()
# X.T * W equal (X.T @ diagflat(W)).diagonal()
beta = beta + self.math.pinv((X.T * self.W) @ X) @ X.T @ (y - self.P)
iter_times += 1
if iter_times > self.max_iter:
break
self.beta_hat = beta
def predict(self, X):
X = self._pre_processing_x(X)
y = X @ self.beta_hat
y[y >= 0] = 1
y[y < 0] = 0
return y
@property
def std_err(self):
"""
ref: https://groups.google.com/d/msg/comp.soft-sys.stat.spss/Fv7Goxs_Bwk/ff0jCesG8REJ
intuitive formula find in : http://data.princeton.edu/wws509/notes/c3.pdf page 10
"""
return self.math.pinv(self.train_x.T * self.W @ self.train_x).diagonal() ** 0.5
| [
"numpy.sum",
"numpy.diagflat",
"numpy.zeros",
"numpy.insert",
"numpy.linalg.eigh",
"numpy.fliplr",
"numpy.linalg.norm",
"numpy.exp",
"numpy.linalg.det",
"math.log",
"numpy.linalg.pinv"
] | [((1845, 1861), 'numpy.zeros', 'np.zeros', (['(K, p)'], {}), '((K, p))\n', (1853, 1861), True, 'import numpy as np\n'), ((1880, 1896), 'numpy.zeros', 'np.zeros', (['(K, 1)'], {}), '((K, 1))\n', (1888, 1896), True, 'import numpy as np\n'), ((1922, 1938), 'numpy.zeros', 'np.zeros', (['(p, p)'], {}), '((p, p))\n', (1930, 1938), True, 'import numpy as np\n'), ((2748, 2775), 'numpy.zeros', 'np.zeros', (['(N, self.n_class)'], {}), '((N, self.n_class))\n', (2756, 2775), True, 'import numpy as np\n'), ((3357, 3373), 'numpy.zeros', 'np.zeros', (['(K, p)'], {}), '((K, p))\n', (3365, 3373), True, 'import numpy as np\n'), ((3392, 3408), 'numpy.zeros', 'np.zeros', (['(K, 1)'], {}), '((K, 1))\n', (3400, 3408), True, 'import numpy as np\n'), ((4287, 4314), 'numpy.zeros', 'np.zeros', (['(N, self.n_class)'], {}), '((N, self.n_class))\n', (4295, 4314), True, 'import numpy as np\n'), ((5205, 5221), 'numpy.zeros', 'np.zeros', (['(K, p)'], {}), '((K, p))\n', (5213, 5221), True, 'import numpy as np\n'), ((5240, 5256), 'numpy.zeros', 'np.zeros', (['(K, 1)'], {}), '((K, 1))\n', (5248, 5256), True, 'import numpy as np\n'), ((5365, 5381), 'numpy.zeros', 'np.zeros', (['(1, p)'], {}), '((1, p))\n', (5373, 5381), True, 'import numpy as np\n'), ((6147, 6161), 'numpy.linalg.eigh', 'LA.eigh', (['sigma'], {}), '(sigma)\n', (6154, 6161), True, 'from numpy import linalg as LA\n'), ((6174, 6189), 'numpy.diagflat', 'np.diagflat', (['D_'], {}), '(D_)\n', (6185, 6189), True, 'import numpy as np\n'), ((6316, 6352), 'numpy.zeros', 'np.zeros', (['(X.shape[0], self.n_class)'], {}), '((X.shape[0], self.n_class))\n', (6324, 6352), True, 'import numpy as np\n'), ((7815, 7838), 'numpy.sum', 'np.sum', (['(Pi * Mu)'], {'axis': '(0)'}), '(Pi * Mu, axis=0)\n', (7821, 7838), True, 'import numpy as np\n'), ((7851, 7867), 'numpy.zeros', 'np.zeros', (['(p, p)'], {}), '((p, p))\n', (7859, 7867), True, 'import numpy as np\n'), ((8243, 8253), 'numpy.linalg.eigh', 'LA.eigh', (['W'], {}), '(W)\n', (8250, 8253), True, 'from numpy import linalg as LA\n'), ((8322, 8335), 'numpy.fliplr', 'np.fliplr', (['Uw'], {}), '(Uw)\n', (8331, 8335), True, 'import numpy as np\n'), ((8453, 8468), 'numpy.linalg.eigh', 'LA.eigh', (['B_star'], {}), '(B_star)\n', (8460, 8468), True, 'from numpy import linalg as LA\n'), ((8502, 8514), 'numpy.fliplr', 'np.fliplr', (['V'], {}), '(V)\n', (8511, 8514), True, 'import numpy as np\n'), ((8623, 8639), 'numpy.zeros', 'np.zeros', (['(L, p)'], {}), '((L, p))\n', (8631, 8639), True, 'import numpy as np\n'), ((9236, 9264), 'numpy.insert', 'np.insert', (['X', '(0)', '[1]'], {'axis': '(1)'}), '(X, 0, [1], axis=1)\n', (9245, 9264), True, 'import numpy as np\n'), ((9397, 9422), 'numpy.zeros', 'np.zeros', (['(self.p + 1, 1)'], {}), '((self.p + 1, 1))\n', (9405, 9422), True, 'import numpy as np\n'), ((2616, 2625), 'math.log', 'log', (['pi_k'], {}), '(pi_k)\n', (2619, 2625), False, 'from math import log\n'), ((4155, 4164), 'math.log', 'log', (['pi_k'], {}), '(pi_k)\n', (4158, 4164), False, 'from math import log\n'), ((9483, 9499), 'numpy.exp', 'np.exp', (['(X @ beta)'], {}), '(X @ beta)\n', (9489, 9499), True, 'import numpy as np\n'), ((603, 642), 'numpy.sum', 'np.sum', (['(self._raw_train_y == self.y_hat)'], {}), '(self._raw_train_y == self.y_hat)\n', (609, 642), True, 'import numpy as np\n'), ((1470, 1509), 'numpy.sum', 'np.sum', (['(self._raw_train_y == self.y_hat)'], {}), '(self._raw_train_y == self.y_hat)\n', (1476, 1509), True, 'import numpy as np\n'), ((6216, 6226), 'numpy.linalg.pinv', 'LA.pinv', (['D'], {}), '(D)\n', (6223, 6226), True, 'from numpy import linalg as LA\n'), ((6870, 6885), 'math.log', 'log', (['self.Pi[k]'], {}), '(self.Pi[k])\n', (6873, 6885), False, 'from math import log\n'), ((8369, 8391), 'numpy.diagflat', 'np.diagflat', (['(Dw ** 0.5)'], {}), '(Dw ** 0.5)\n', (8380, 8391), True, 'import numpy as np\n'), ((6826, 6861), 'numpy.linalg.norm', 'LA.norm', (['(X_star - mu_k_star)'], {'axis': '(1)'}), '(X_star - mu_k_star, axis=1)\n', (6833, 6861), True, 'from numpy import linalg as LA\n'), ((2129, 2148), 'numpy.sum', 'np.sum', (['X_k'], {'axis': '(0)'}), '(X_k, axis=0)\n', (2135, 2148), True, 'import numpy as np\n'), ((3626, 3645), 'numpy.sum', 'np.sum', (['X_k'], {'axis': '(0)'}), '(X_k, axis=0)\n', (3632, 3645), True, 'import numpy as np\n'), ((5571, 5590), 'numpy.sum', 'np.sum', (['X_k'], {'axis': '(0)'}), '(X_k, axis=0)\n', (5577, 5590), True, 'import numpy as np\n'), ((4050, 4072), 'numpy.linalg.det', 'np.linalg.det', (['sigma_k'], {}), '(sigma_k)\n', (4063, 4072), True, 'import numpy as np\n')] |
import h5py
import oneflow as flow
import shutil
import numpy as np
import os
def save_net(fname, net):
with h5py.File(fname, "w") as h5f:
for k, v in net.state_dict().items():
h5f.create_dataset(k, data=v.cpu().numpy())
def load_net(fname, net):
with h5py.File(fname, "r") as h5f:
for k, v in net.state_dict().items():
param = flow.Tensor(np.asarray(h5f[k]))
v.copy_(param)
def save_checkpoint(state, is_best, task_id, filename="checkpoints/"):
del_file(filename + str(int(task_id) - 1))
flow.save(state["state_dict"], filename + task_id)
if is_best:
file_path = "checkpoints/model_best"
del_file(file_path)
shutil.copytree(filename + task_id, file_path)
def del_file(filepath):
"""
Delete all files or folders in a directory
:param filepath:
:return:
"""
if os.path.exists(filepath):
del_list = os.listdir(filepath)
for f in del_list:
file_path = os.path.join(filepath, f)
if os.path.isfile(file_path):
os.remove(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
shutil.rmtree(filepath)
| [
"h5py.File",
"os.remove",
"shutil.rmtree",
"os.path.isdir",
"numpy.asarray",
"os.path.exists",
"oneflow.save",
"os.path.isfile",
"shutil.copytree",
"os.path.join",
"os.listdir"
] | [((564, 614), 'oneflow.save', 'flow.save', (["state['state_dict']", '(filename + task_id)'], {}), "(state['state_dict'], filename + task_id)\n", (573, 614), True, 'import oneflow as flow\n'), ((890, 914), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (904, 914), False, 'import os\n'), ((115, 136), 'h5py.File', 'h5py.File', (['fname', '"""w"""'], {}), "(fname, 'w')\n", (124, 136), False, 'import h5py\n'), ((284, 305), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (293, 305), False, 'import h5py\n'), ((712, 758), 'shutil.copytree', 'shutil.copytree', (['(filename + task_id)', 'file_path'], {}), '(filename + task_id, file_path)\n', (727, 758), False, 'import shutil\n'), ((935, 955), 'os.listdir', 'os.listdir', (['filepath'], {}), '(filepath)\n', (945, 955), False, 'import os\n'), ((1204, 1227), 'shutil.rmtree', 'shutil.rmtree', (['filepath'], {}), '(filepath)\n', (1217, 1227), False, 'import shutil\n'), ((1007, 1032), 'os.path.join', 'os.path.join', (['filepath', 'f'], {}), '(filepath, f)\n', (1019, 1032), False, 'import os\n'), ((1048, 1073), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (1062, 1073), False, 'import os\n'), ((392, 410), 'numpy.asarray', 'np.asarray', (['h5f[k]'], {}), '(h5f[k])\n', (402, 410), True, 'import numpy as np\n'), ((1091, 1111), 'os.remove', 'os.remove', (['file_path'], {}), '(file_path)\n', (1100, 1111), False, 'import os\n'), ((1129, 1153), 'os.path.isdir', 'os.path.isdir', (['file_path'], {}), '(file_path)\n', (1142, 1153), False, 'import os\n'), ((1171, 1195), 'shutil.rmtree', 'shutil.rmtree', (['file_path'], {}), '(file_path)\n', (1184, 1195), False, 'import shutil\n')] |
import numpy as np
from sklearn.neighbors import KNeighborsClassifier as SKKNeighborsClassifier
from skopt.space import Integer
from evalml.model_family import ModelFamily
from evalml.pipelines.components.estimators import Estimator
from evalml.problem_types import ProblemTypes
class KNeighborsClassifier(Estimator):
"""
K-Nearest Neighbors Classifier.
"""
name = "KNN Classifier"
hyperparameter_ranges = {
"n_neighbors": Integer(2, 12),
"weights": ["uniform", "distance"],
"algorithm": ["auto", "ball_tree", "kd_tree", "brute"],
"leaf_size": Integer(10, 30),
"p": Integer(1, 5)
}
model_family = ModelFamily.K_NEIGHBORS
supported_problem_types = [ProblemTypes.BINARY, ProblemTypes.MULTICLASS,
ProblemTypes.TIME_SERIES_BINARY, ProblemTypes.TIME_SERIES_MULTICLASS]
def __init__(self,
n_neighbors=5,
weights="uniform",
algorithm="auto",
leaf_size=30,
p=2,
random_seed=0,
**kwargs):
parameters = {"n_neighbors": n_neighbors,
"weights": weights,
"algorithm": algorithm,
"leaf_size": leaf_size,
"p": p}
parameters.update(kwargs)
knn_classifier = SKKNeighborsClassifier(**parameters)
super().__init__(parameters=parameters,
component_obj=knn_classifier,
random_seed=random_seed)
@property
def feature_importance(self):
"""
Returns array of 0's matching the input number of features as feature_importance is
not defined for KNN classifiers.
"""
num_features = self._component_obj.n_features_in_
return np.zeros(num_features)
| [
"numpy.zeros",
"skopt.space.Integer",
"sklearn.neighbors.KNeighborsClassifier"
] | [((454, 468), 'skopt.space.Integer', 'Integer', (['(2)', '(12)'], {}), '(2, 12)\n', (461, 468), False, 'from skopt.space import Integer\n'), ((599, 614), 'skopt.space.Integer', 'Integer', (['(10)', '(30)'], {}), '(10, 30)\n', (606, 614), False, 'from skopt.space import Integer\n'), ((629, 642), 'skopt.space.Integer', 'Integer', (['(1)', '(5)'], {}), '(1, 5)\n', (636, 642), False, 'from skopt.space import Integer\n'), ((1383, 1419), 'sklearn.neighbors.KNeighborsClassifier', 'SKKNeighborsClassifier', ([], {}), '(**parameters)\n', (1405, 1419), True, 'from sklearn.neighbors import KNeighborsClassifier as SKKNeighborsClassifier\n'), ((1852, 1874), 'numpy.zeros', 'np.zeros', (['num_features'], {}), '(num_features)\n', (1860, 1874), True, 'import numpy as np\n')] |
# Environment
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from scipy.stats import beta
class Env_Gaussian(object):
'''
A collection of functions to implement and visualize a Gaussian environment.
Rewards are drawn from a Gaussian distirubtion with unit variance.
'''
def __init__(self, k=5, seed=None):
'''
Initialize reward distributions
'''
self.k = k
if seed != None:
np.random.seed(seed)
self.arm_centers = np.random.normal(loc=0, scale=1, size=self.k)
self.arm_widths = np.ones(self.k)
def get_reward(self, last_action):
'''
Draw a reward for the corresponding action.
'''
self.last_reward = np.random.normal(loc=self.arm_centers[last_action],
scale=self.arm_widths[last_action], size=1)
return self.last_reward
def sample_env(self):
'''
Draw a number of samples from each environment. Mainly used for visualization.
'''
N_samples = 10000
self.samples = np.empty([N_samples,self.k])
for i, center, width in zip(range(self.k), self.arm_centers, self.arm_widths):
self.samples[:,i] = np.random.normal(loc=center, scale=width, size=N_samples)
def visualize_env(self):
'''
Visualize Gaussian environment using violin plots.
'''
self.sample_env()
df = pd.DataFrame(self.samples)
fig, ax = plt.subplots(1,1, figsize=(16,4))
ax = sns.violinplot(data=df)
labels = ['Action {}\n {:4.2f}'.format(i,v) for i,v in zip(range(1,self.k+1), self.arm_centers)]
ax.set_xticklabels(labels,fontsize=13);
ax.set_title("{}-Armed Bandit Gaussian Environemnt".format(self.k),fontsize=15);
class Env_Bernoulli(Env_Gaussian):
'''
A collection of functions to implement and visualize a Bernoulli environment.
Rewards are drawn from a Bernoulli distirubtion, parametrized by success probability p_k.
Inherits Env_Gaussian class.
'''
def __init__(self, k=3, seed=None, p=None):
'''
Initialize reward distributions. Inherits Env_Gaussian __init__().
'''
super(Env_Bernoulli,self).__init__(k=k, seed=seed)
if p==None:
p = np.random.uniform(0,0.2, size=k)
self.arm_centers = p
def get_reward(self, last_action):
'''
Draw a reward for the corresponding action.
'''
self.last_reward = np.random.binomial(1,self.arm_centers[last_action],1)
return self.last_reward
def sample_env(self):
'''
Draw a number of samples from each environment. Mainly used for visualization.
'''
N_samples = 100
self.samples = np.empty([N_samples,self.k])
for i, p in zip(range(self.k), self.arm_centers):
self.samples[:,i] = [np.mean(np.random.binomial(1, p ,1000)) for ii in range(N_samples)]
def visualize_env(self):
'''
Visualize Bernoulli environment using violin plots.
This plots the distribution of estimated success probabilities given a series of experiments, each involving 100 samples.
'''
self.sample_env()
N_beta_samples = 10000
beta_samples = np.empty([N_beta_samples,self.k])
for i, sample in enumerate(np.transpose(self.samples)):
a = np.sum(sample)
b = len(sample) - a
beta_samples[:,i] = [np.random.beta(a,b) for _ in range(N_beta_samples)]
df = pd.DataFrame(beta_samples)
fig, ax = plt.subplots(1,1, figsize=(16,3))
ax = sns.violinplot(data=df)
labels = ['Action {}\n{:0.2f}'.format(i,v) for i,v in zip(range(1,self.k+1),self.arm_centers)]
ax.set_xticklabels(labels,fontsize=13);
ax.set_title("{}-Armed Bandit Bernoulli Environment".format(self.k),fontsize=18);
| [
"pandas.DataFrame",
"numpy.random.uniform",
"numpy.random.binomial",
"numpy.random.seed",
"numpy.sum",
"numpy.random.beta",
"numpy.empty",
"numpy.transpose",
"numpy.ones",
"numpy.random.normal",
"matplotlib.pyplot.subplots",
"seaborn.violinplot"
] | [((548, 593), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': '(1)', 'size': 'self.k'}), '(loc=0, scale=1, size=self.k)\n', (564, 593), True, 'import numpy as np\n'), ((621, 636), 'numpy.ones', 'np.ones', (['self.k'], {}), '(self.k)\n', (628, 636), True, 'import numpy as np\n'), ((780, 880), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'self.arm_centers[last_action]', 'scale': 'self.arm_widths[last_action]', 'size': '(1)'}), '(loc=self.arm_centers[last_action], scale=self.arm_widths[\n last_action], size=1)\n', (796, 880), True, 'import numpy as np\n'), ((1152, 1181), 'numpy.empty', 'np.empty', (['[N_samples, self.k]'], {}), '([N_samples, self.k])\n', (1160, 1181), True, 'import numpy as np\n'), ((1530, 1556), 'pandas.DataFrame', 'pd.DataFrame', (['self.samples'], {}), '(self.samples)\n', (1542, 1556), True, 'import pandas as pd\n'), ((1575, 1610), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(16, 4)'}), '(1, 1, figsize=(16, 4))\n', (1587, 1610), True, 'import matplotlib.pyplot as plt\n'), ((1622, 1645), 'seaborn.violinplot', 'sns.violinplot', ([], {'data': 'df'}), '(data=df)\n', (1636, 1645), True, 'import seaborn as sns\n'), ((2626, 2681), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'self.arm_centers[last_action]', '(1)'], {}), '(1, self.arm_centers[last_action], 1)\n', (2644, 2681), True, 'import numpy as np\n'), ((2905, 2934), 'numpy.empty', 'np.empty', (['[N_samples, self.k]'], {}), '([N_samples, self.k])\n', (2913, 2934), True, 'import numpy as np\n'), ((3425, 3459), 'numpy.empty', 'np.empty', (['[N_beta_samples, self.k]'], {}), '([N_beta_samples, self.k])\n', (3433, 3459), True, 'import numpy as np\n'), ((3697, 3723), 'pandas.DataFrame', 'pd.DataFrame', (['beta_samples'], {}), '(beta_samples)\n', (3709, 3723), True, 'import pandas as pd\n'), ((3750, 3785), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(16, 3)'}), '(1, 1, figsize=(16, 3))\n', (3762, 3785), True, 'import matplotlib.pyplot as plt\n'), ((3797, 3820), 'seaborn.violinplot', 'sns.violinplot', ([], {'data': 'df'}), '(data=df)\n', (3811, 3820), True, 'import seaborn as sns\n'), ((500, 520), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (514, 520), True, 'import numpy as np\n'), ((1300, 1357), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'center', 'scale': 'width', 'size': 'N_samples'}), '(loc=center, scale=width, size=N_samples)\n', (1316, 1357), True, 'import numpy as np\n'), ((2409, 2442), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(0.2)'], {'size': 'k'}), '(0, 0.2, size=k)\n', (2426, 2442), True, 'import numpy as np\n'), ((3495, 3521), 'numpy.transpose', 'np.transpose', (['self.samples'], {}), '(self.samples)\n', (3507, 3521), True, 'import numpy as np\n'), ((3552, 3566), 'numpy.sum', 'np.sum', (['sample'], {}), '(sample)\n', (3558, 3566), True, 'import numpy as np\n'), ((3632, 3652), 'numpy.random.beta', 'np.random.beta', (['a', 'b'], {}), '(a, b)\n', (3646, 3652), True, 'import numpy as np\n'), ((3033, 3063), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'p', '(1000)'], {}), '(1, p, 1000)\n', (3051, 3063), True, 'import numpy as np\n')] |
"""
Validating IOU of a trained model.
Configurations
scannet_dir: root dir of scannet
device: for saving
model_name: trained DNN
data_type: Random, Grid, Hierarchy
specify_id: if want to valid specific ids
n_scene: how many scenes to validate
Results saved to ../Result/{device}/{model_name}/{data_type}/result_main.csv.
Format: keep ratio; average number of points; mean IOU; average running time (s); FLOPs (M); memory (M)
"""
import torch
import torch.nn as nn
import numpy as np
import glob
import math
import sparseconvnet as scn
import iou
import torch.utils.data
import multiprocessing as mp
import os
import time
import sys
import psutil
# ------ Configurations ------
scannet_dir = "/home/dtc/Backup/Data/ScanNet"
device = "alienware"
# trained model in ../Model/
model_name = "scannet_m32_rep2_residualTrue-000000670.pth"
# Random, Grid, Hierarchy
data_type = "Grid"
specify_id = [] # if want to valid specific ids
use_cuda = True
#!!!!!!!!!!!
# n_scene = 50
# --- end of configurations ---
# Model Options
extract_model_options = model_name.split("-")[0]
extract_model_options = extract_model_options.split("_")
m = int(extract_model_options[1][1:])
block_reps = int(extract_model_options[2][3:])
residual_blocks = extract_model_options[3][8:]
if residual_blocks == "True":
residual_blocks = True
elif residual_blocks == "False":
residual_blocks = False
else:
sys.exit("Unknown residual blocks")
# m = 16 # 16 or 32; 16
# residual_blocks = True # True or False; False
# block_reps = 2 # Conv block repetition factor: 1 or 2; 1
dimension = 3
scale = 100
full_scale = 4096
offset_filename = "valOffsets.npy"
result_filename = "data.npy"
val = []
valOffsets = []
valLabels = []
# load model
class Model(nn.Module):
def __init__(self):
nn.Module.__init__(self)
self.sparseModel = scn.Sequential().add(
scn.InputLayer(dimension, full_scale, mode=4)).add(
scn.SubmanifoldConvolution(dimension, 3, m, 3, False)).add(
scn.UNet(dimension, block_reps, [m, 2*m, 3*m, 4*m, 5*m, 6*m, 7*m], residual_blocks)).add(
scn.BatchNormReLU(m)).add(
scn.OutputLayer(dimension))
self.linear = nn.Linear(m, 20)
def forward(self, x):
x = self.sparseModel(x)
x = self.linear(x)
return x
print(" --- loading model ---", model_name)
model_file = os.path.join("../Model", model_name)
unet = Model()
unet.load_state_dict(torch.load(model_file))
if use_cuda:
unet.cuda()
save_dir = os.path.join("../Result", device, os.path.splitext(model_name)[0], data_type)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
def coords_transform(physical_val):
a, b, c = physical_val
m = np.eye(3)
m *= scale
# theta = np.random.rand()*2*math.pi
theta = 0
m = np.matmul(m, [[math.cos(theta), math.sin(theta), 0], [-math.sin(theta), math.cos(theta), 0], [0, 0, 1]])
a = np.matmul(a, m) + full_scale / 2
m = a.min(0)
M = a.max(0)
q = M - m
offset = -m
a += offset
idxs = (a.min(1) >= 0) * (a.max(1) < full_scale)
a = a[idxs]
b = b[idxs]
c = c[idxs]
return a, b, c
def valMerge(tbl):
locs = []
feats = []
labels = []
point_ids = []
for idx, i in enumerate(tbl):
a, b, c = val[i]
m = np.eye(3)
m *= scale
# theta = np.random.rand()*2*math.pi
theta = 0
m = np.matmul(m, [[math.cos(theta), math.sin(theta), 0], [-math.sin(theta), math.cos(theta), 0], [0, 0, 1]])
a = np.matmul(a, m)+full_scale/2
m = a.min(0)
M = a.max(0)
q = M-m
offset = -m
a += offset
idxs = (a.min(1) >= 0)*(a.max(1) < full_scale)
a = a[idxs]
b = b[idxs]
c = c[idxs]
a = torch.from_numpy(a).long()
locs.append(torch.cat([a, torch.LongTensor(a.shape[0], 1).fill_(idx)], 1))
feats.append(torch.from_numpy(b))
labels.append(torch.from_numpy(c))
point_ids.append(torch.from_numpy(np.nonzero(idxs)[0]+valOffsets[i]))
locs = torch.cat(locs, 0)
feats = torch.cat(feats, 0)
labels = torch.cat(labels, 0)
point_ids = torch.cat(point_ids, 0)
return {'x': [locs, feats], 'y': labels.long(), 'id': tbl, 'point_ids': point_ids}
def valid_data(data_id):
# data_id is ratio of point clouds
start_time = time.time()
process = psutil.Process(os.getpid())
ret_memory = 0
ret_time = 0
data_name = data_type + "/" + str(data_id)
ret_data_id = data_id
data_dir = os.path.join(scannet_dir, "Pth", data_name)
# load val data
print("loading val data", data_name)
global val
val = []
if "n_scene" in locals() or "n_scene" in globals():
for x in torch.utils.data.DataLoader(
glob.glob(os.path.join(data_dir, "*.pth")),
collate_fn=lambda x: torch.load(x[0]), num_workers=mp.cpu_count()):
val.append(x)
else:
for x in torch.utils.data.DataLoader(
glob.glob(os.path.join(data_dir, "*.pth")),
collate_fn=lambda x: torch.load(x[0]), num_workers=mp.cpu_count()):
val.append(x)
print("data from {} scenes".format(len(val)))
global valOffsets
global valLabels
valOffsets = [0]
valLabels = []
for idx, x in enumerate(val):
valOffsets.append(valOffsets[-1]+x[2].size)
valLabels.append(x[2].astype(np.int32))
valLabels = np.hstack(valLabels)
val_data_loader = torch.utils.data.DataLoader(
list(range(len(val))), batch_size=1, collate_fn=valMerge, num_workers=20, shuffle=False)
print("calculating accuracy ")
with torch.no_grad():
unet.eval()
store = torch.zeros(valOffsets[-1], 20)
scn.forward_pass_multiplyAdd_count = 0
scn.forward_pass_hidden_states = 0
start = time.time()
# for rep in range(1, 1 + val_reps):
for i, batch in enumerate(val_data_loader):
if use_cuda:
batch['x'][1] = batch['x'][1].cuda()
batch['y'] = batch['y'].cuda()
start_time_ret = time.time()
predictions = unet(batch['x'])
store.index_add_(0, batch['point_ids'], predictions.cpu())
ret_memory += process.memory_info().rss / 1e6
ret_time += time.time() - start_time_ret
ret_muladd = scn.forward_pass_multiplyAdd_count / 1e6
print('Val MegaMulAdd=', scn.forward_pass_multiplyAdd_count / len(val) / 1e6, 'MegaHidden',
scn.forward_pass_hidden_states / len(val) / 1e6, 'time=', time.time() - start, 's',
"Memory (M)=", ret_memory / len(val))
ret_iou = iou.evaluate(store.max(1)[1].numpy(), valLabels)
print("Time for data_id {}: {:.2f} s".format(data_id, time.time() - start_time))
return ret_data_id, len(valLabels)/len(val), 100*ret_iou, ret_time/len(val), ret_muladd/len(val), ret_memory/len(val)
if __name__ == "__main__":
result = []
def func_filename(x):
return int(os.path.basename(x))
if specify_id:
for my_id in specify_id:
result.append(valid_data(my_id))
else:
data_dirs = sorted(glob.glob(os.path.join(scannet_dir, "Pth", data_type, "*")), key=func_filename)
for data_dir in data_dirs:
my_id = int(os.path.basename(data_dir))
result.append(valid_data(my_id))
result_vstack = np.vstack(result)
print("id, avg num of points, mean iou, avg time (s), avg_flop(M), memory(M)")
print(np.array_str(result_vstack, precision=2, suppress_small=True))
save_file = os.path.join(save_dir, "result_main.csv")
print("saving file to:", save_file)
np.savetxt(save_file, result, fmt="%d,%.2f,%.2f,%.2f,%.2f,%.2f",
header="data_id,avg_num_points,mean_iou,avg_time(s),avg_addmul(M),memory(M)")
| [
"sparseconvnet.BatchNormReLU",
"numpy.array_str",
"torch.cat",
"torch.no_grad",
"os.path.join",
"multiprocessing.cpu_count",
"sparseconvnet.Sequential",
"torch.load",
"numpy.savetxt",
"os.path.exists",
"sparseconvnet.SubmanifoldConvolution",
"sparseconvnet.OutputLayer",
"math.cos",
"torch.... | [((2452, 2488), 'os.path.join', 'os.path.join', (['"""../Model"""', 'model_name'], {}), "('../Model', model_name)\n", (2464, 2488), False, 'import os\n'), ((2525, 2547), 'torch.load', 'torch.load', (['model_file'], {}), '(model_file)\n', (2535, 2547), False, 'import torch\n'), ((2675, 2699), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (2689, 2699), False, 'import os\n'), ((2705, 2726), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (2716, 2726), False, 'import os\n'), ((2800, 2809), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (2806, 2809), True, 'import numpy as np\n'), ((4149, 4167), 'torch.cat', 'torch.cat', (['locs', '(0)'], {}), '(locs, 0)\n', (4158, 4167), False, 'import torch\n'), ((4180, 4199), 'torch.cat', 'torch.cat', (['feats', '(0)'], {}), '(feats, 0)\n', (4189, 4199), False, 'import torch\n'), ((4213, 4233), 'torch.cat', 'torch.cat', (['labels', '(0)'], {}), '(labels, 0)\n', (4222, 4233), False, 'import torch\n'), ((4250, 4273), 'torch.cat', 'torch.cat', (['point_ids', '(0)'], {}), '(point_ids, 0)\n', (4259, 4273), False, 'import torch\n'), ((4445, 4456), 'time.time', 'time.time', ([], {}), '()\n', (4454, 4456), False, 'import time\n'), ((4627, 4670), 'os.path.join', 'os.path.join', (['scannet_dir', '"""Pth"""', 'data_name'], {}), "(scannet_dir, 'Pth', data_name)\n", (4639, 4670), False, 'import os\n'), ((5543, 5563), 'numpy.hstack', 'np.hstack', (['valLabels'], {}), '(valLabels)\n', (5552, 5563), True, 'import numpy as np\n'), ((7528, 7545), 'numpy.vstack', 'np.vstack', (['result'], {}), '(result)\n', (7537, 7545), True, 'import numpy as np\n'), ((7719, 7760), 'os.path.join', 'os.path.join', (['save_dir', '"""result_main.csv"""'], {}), "(save_dir, 'result_main.csv')\n", (7731, 7760), False, 'import os\n'), ((7805, 7952), 'numpy.savetxt', 'np.savetxt', (['save_file', 'result'], {'fmt': '"""%d,%.2f,%.2f,%.2f,%.2f,%.2f"""', 'header': '"""data_id,avg_num_points,mean_iou,avg_time(s),avg_addmul(M),memory(M)"""'}), "(save_file, result, fmt='%d,%.2f,%.2f,%.2f,%.2f,%.2f', header=\n 'data_id,avg_num_points,mean_iou,avg_time(s),avg_addmul(M),memory(M)')\n", (7815, 7952), True, 'import numpy as np\n'), ((1468, 1503), 'sys.exit', 'sys.exit', (['"""Unknown residual blocks"""'], {}), "('Unknown residual blocks')\n", (1476, 1503), False, 'import sys\n'), ((1861, 1885), 'torch.nn.Module.__init__', 'nn.Module.__init__', (['self'], {}), '(self)\n', (1879, 1885), True, 'import torch.nn as nn\n'), ((2273, 2289), 'torch.nn.Linear', 'nn.Linear', (['m', '(20)'], {}), '(m, 20)\n', (2282, 2289), True, 'import torch.nn as nn\n'), ((2624, 2652), 'os.path.splitext', 'os.path.splitext', (['model_name'], {}), '(model_name)\n', (2640, 2652), False, 'import os\n'), ((3001, 3016), 'numpy.matmul', 'np.matmul', (['a', 'm'], {}), '(a, m)\n', (3010, 3016), True, 'import numpy as np\n'), ((3390, 3399), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (3396, 3399), True, 'import numpy as np\n'), ((4487, 4498), 'os.getpid', 'os.getpid', ([], {}), '()\n', (4496, 4498), False, 'import os\n'), ((5758, 5773), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5771, 5773), False, 'import torch\n'), ((5811, 5842), 'torch.zeros', 'torch.zeros', (['valOffsets[-1]', '(20)'], {}), '(valOffsets[-1], 20)\n', (5822, 5842), False, 'import torch\n'), ((5949, 5960), 'time.time', 'time.time', ([], {}), '()\n', (5958, 5960), False, 'import time\n'), ((7639, 7700), 'numpy.array_str', 'np.array_str', (['result_vstack'], {'precision': '(2)', 'suppress_small': '(True)'}), '(result_vstack, precision=2, suppress_small=True)\n', (7651, 7700), True, 'import numpy as np\n'), ((2223, 2249), 'sparseconvnet.OutputLayer', 'scn.OutputLayer', (['dimension'], {}), '(dimension)\n', (2238, 2249), True, 'import sparseconvnet as scn\n'), ((3611, 3626), 'numpy.matmul', 'np.matmul', (['a', 'm'], {}), '(a, m)\n', (3620, 3626), True, 'import numpy as np\n'), ((3996, 4015), 'torch.from_numpy', 'torch.from_numpy', (['b'], {}), '(b)\n', (4012, 4015), False, 'import torch\n'), ((4039, 4058), 'torch.from_numpy', 'torch.from_numpy', (['c'], {}), '(c)\n', (4055, 4058), False, 'import torch\n'), ((6214, 6225), 'time.time', 'time.time', ([], {}), '()\n', (6223, 6225), False, 'import time\n'), ((7139, 7158), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (7155, 7158), False, 'import os\n'), ((2903, 2918), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (2911, 2918), False, 'import math\n'), ((2920, 2935), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (2928, 2935), False, 'import math\n'), ((2960, 2975), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (2968, 2975), False, 'import math\n'), ((3865, 3884), 'torch.from_numpy', 'torch.from_numpy', (['a'], {}), '(a)\n', (3881, 3884), False, 'import torch\n'), ((4889, 4920), 'os.path.join', 'os.path.join', (['data_dir', '"""*.pth"""'], {}), "(data_dir, '*.pth')\n", (4901, 4920), False, 'import os\n'), ((4990, 5004), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (5002, 5004), True, 'import multiprocessing as mp\n'), ((5115, 5146), 'os.path.join', 'os.path.join', (['data_dir', '"""*.pth"""'], {}), "(data_dir, '*.pth')\n", (5127, 5146), False, 'import os\n'), ((5216, 5230), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (5228, 5230), True, 'import multiprocessing as mp\n'), ((6422, 6433), 'time.time', 'time.time', ([], {}), '()\n', (6431, 6433), False, 'import time\n'), ((6686, 6697), 'time.time', 'time.time', ([], {}), '()\n', (6695, 6697), False, 'import time\n'), ((7305, 7353), 'os.path.join', 'os.path.join', (['scannet_dir', '"""Pth"""', 'data_type', '"""*"""'], {}), "(scannet_dir, 'Pth', data_type, '*')\n", (7317, 7353), False, 'import os\n'), ((7434, 7460), 'os.path.basename', 'os.path.basename', (['data_dir'], {}), '(data_dir)\n', (7450, 7460), False, 'import os\n'), ((2185, 2205), 'sparseconvnet.BatchNormReLU', 'scn.BatchNormReLU', (['m'], {}), '(m)\n', (2202, 2205), True, 'import sparseconvnet as scn\n'), ((2943, 2958), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (2951, 2958), False, 'import math\n'), ((3509, 3524), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (3517, 3524), False, 'import math\n'), ((3526, 3541), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (3534, 3541), False, 'import math\n'), ((3566, 3581), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (3574, 3581), False, 'import math\n'), ((4960, 4976), 'torch.load', 'torch.load', (['x[0]'], {}), '(x[0])\n', (4970, 4976), False, 'import torch\n'), ((5186, 5202), 'torch.load', 'torch.load', (['x[0]'], {}), '(x[0])\n', (5196, 5202), False, 'import torch\n'), ((6894, 6905), 'time.time', 'time.time', ([], {}), '()\n', (6903, 6905), False, 'import time\n'), ((3549, 3564), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (3557, 3564), False, 'import math\n'), ((4102, 4118), 'numpy.nonzero', 'np.nonzero', (['idxs'], {}), '(idxs)\n', (4112, 4118), True, 'import numpy as np\n'), ((2084, 2183), 'sparseconvnet.UNet', 'scn.UNet', (['dimension', 'block_reps', '[m, 2 * m, 3 * m, 4 * m, 5 * m, 6 * m, 7 * m]', 'residual_blocks'], {}), '(dimension, block_reps, [m, 2 * m, 3 * m, 4 * m, 5 * m, 6 * m, 7 *\n m], residual_blocks)\n', (2092, 2183), True, 'import sparseconvnet as scn\n'), ((3926, 3957), 'torch.LongTensor', 'torch.LongTensor', (['a.shape[0]', '(1)'], {}), '(a.shape[0], 1)\n', (3942, 3957), False, 'import torch\n'), ((2009, 2062), 'sparseconvnet.SubmanifoldConvolution', 'scn.SubmanifoldConvolution', (['dimension', '(3)', 'm', '(3)', '(False)'], {}), '(dimension, 3, m, 3, False)\n', (2035, 2062), True, 'import sparseconvnet as scn\n'), ((1946, 1991), 'sparseconvnet.InputLayer', 'scn.InputLayer', (['dimension', 'full_scale'], {'mode': '(4)'}), '(dimension, full_scale, mode=4)\n', (1960, 1991), True, 'import sparseconvnet as scn\n'), ((1913, 1929), 'sparseconvnet.Sequential', 'scn.Sequential', ([], {}), '()\n', (1927, 1929), True, 'import sparseconvnet as scn\n')] |
import os
import sys
import gym
import numpy as np
sys.path.append(os.getcwd())
from data.helpers import dodict
from data.trainer import Trainer
from game.game import Game
from data.replay_buffer import ReplayBuffer
# Importing Agents
from data.agent import BaseAgent
from agents.random_agent import RandomAgent
from agents.tor_dqn import DQNAgent
from agents.tor_reinforce import RFAgent
from agents.tor_adv_ac import ACAgent
import pdb
# Training a Reinforce Agent.
# Default Agent Network
network_dims=dodict(dict(
clayers=2,
cl_dims=[3, 6, 12],
nlayers=2,
nl_dims=[128, 128]))
agent_network=dodict(dict(
network_dims=network_dims))
config = dodict(dict(
# Environment
env="LunarLander-v2",
# Training Control
epochs=5000,
episodes=1,
train_steps=1,
update_eps=1,
training=True,
save_replay=False,
save_checkpoint=False,
rollout_steps=3000,
# Agent Control
agent_type="REINFORCE",
agent_class=ACAgent,
load_model=False,
lr=0.0001,
gamma=0.90,
epislon=0.95,
epsilon_dec=0.99,
epsilon_update=10,
agent_network=agent_network,
buffer_size=1000,
batch_size=64,
# Log Control
msg="message",
notes="reinforce agent",
project_name="gym-benchmarks",
wandb=True,
wandb_mode="online",
wandb_run_name="reinforce",
log_level=10,
log_file="logs/reinforce.log",
))
class train_gym(Trainer):
def __init__(self, config, env, **env_specs):
super(train_gym, self).__init__(config)
self.config = config
self.env = env
self.input_dims = env_specs["input_dims"]
self.output_dims = env_specs["output_dims"]
self.action_space = env_specs["action_space"]
# initialize the agent
self.agent = self.initialize_agents()
self.checkpnt_state = self.agent.save_state()
def train(self):
rewards_hist = []
steps_hist = []
for epoch in range(self.config.epochs):
loss = 0
# Run Episodes
steps, rewards, epsilon = self.run_episodes()
# Train
if self.config.training:
loss = self.run_training()
if (epoch%self.config.update_eps) == 0:
self.agent.update_eps()
# Any Agent Specific Update goes here.
rewards_hist.append(rewards)
#reward_avg = np.mean(rewards)
steps_hist.append(steps)
#loss_avg = np.mean(loss)
if self.config.save_replay:
pass
if self.config.save_checkpoint:
pass
if ((epoch+1)%100) == 0:
info = dict(
steps = np.mean(steps_hist[-99:]),
rewards = np.mean(rewards_hist[-99:]))
self.update_logs(epoch, info=info)
print(f"Epochs:{epoch:4} | Steps:{info['steps']:4.2f} | Rewards:{info['rewards']:4.2f}")
def initialize_agents(self):
memory = ReplayBuffer(
self.config.buffer_size,
self.config.batch_size,
self.input_dims)
if self.config.agent_type == "random":
agent = RandomAgent(
str(self.config.agent_class),
self.input_dims,
self.output_dims,
self.action_space)
return agent
agent = self.config.agent_class(
str(self.config.agent_class),
self.input_dims,
self.output_dims,
self.action_space,
memory=memory,
**self.config)
assert isinstance(agent, BaseAgent), "Agent not of class BaseAgent"
return agent
def run_episodes(self):
step_hist = []
reward_hist = []
epsilon = 0
for ep in range(self.config.episodes):
observation = self.env.reset()
done = False
steps = 0
total_reward = 0
while not done:
action, probs = self.agent.get_action(observation)
next_, reward, done, info = self.env.step(action)
self.agent.store_transition(observation,
action,
reward,
next_,
done,
probs)
total_reward += reward
#epsilon = self.agent.epsilon
observation = next_
steps+=1
step_hist.append(steps)
reward_hist.append(total_reward)
return step_hist, reward_hist, epsilon
def run_training(self):
loss_hist = []
for i in range(self.config.train_steps):
loss = self.agent.train_on_batch()
loss_hist.append(loss)
return loss_hist
def save_replay(self):
pass
def load_checkpoint(self):
pass
def save_checkpoint(self):
pass
if __name__=="__main__":
# Create the Environment object.
try:
env = gym.make(config.env)
except Exception as e:
print(e)
print(f"Gym Environment:{config.env} could not be created!")
input_dims = env.observation_space.shape
output_dims = env.action_space.n
action_space = [i for i in range(env.action_space.n)]
trainer = train_gym(config,
env,
input_dims=input_dims,
output_dims=output_dims,
action_space = action_space)
trainer.train()
trainer.shut_logger()
| [
"os.getcwd",
"numpy.mean",
"gym.make",
"data.replay_buffer.ReplayBuffer"
] | [((67, 78), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (76, 78), False, 'import os\n'), ((3174, 3252), 'data.replay_buffer.ReplayBuffer', 'ReplayBuffer', (['self.config.buffer_size', 'self.config.batch_size', 'self.input_dims'], {}), '(self.config.buffer_size, self.config.batch_size, self.input_dims)\n', (3186, 3252), False, 'from data.replay_buffer import ReplayBuffer\n'), ((5262, 5282), 'gym.make', 'gym.make', (['config.env'], {}), '(config.env)\n', (5270, 5282), False, 'import gym\n'), ((2877, 2902), 'numpy.mean', 'np.mean', (['steps_hist[-99:]'], {}), '(steps_hist[-99:])\n', (2884, 2902), True, 'import numpy as np\n'), ((2938, 2965), 'numpy.mean', 'np.mean', (['rewards_hist[-99:]'], {}), '(rewards_hist[-99:])\n', (2945, 2965), True, 'import numpy as np\n')] |
# ==============================================================================
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
import numpy as np
from cntk import transpose
def test_transpose():
"""
Test for transpose()
:return: Nothing
"""
repeat_for = 5
for repeat in range(repeat_for):
for i in range(1, 5):
permutation = np.random.permutation(i + 1)
permutation = [int(p) for p in permutation]
shape = [np.random.randint(2, 5) for _ in range(i + 1)]
entries = np.product(shape)
data = np.arange(entries)
data.shape = shape
np_transposed = np.transpose(np.copy(data), np.copy(permutation))
by_transposeCNTK = transpose(np.ascontiguousarray(data), permutation).eval()
assert np.alltrue(np_transposed == by_transposeCNTK)
if __name__ == "__main__":
test_transpose()
| [
"numpy.copy",
"numpy.product",
"numpy.random.randint",
"numpy.arange",
"numpy.alltrue",
"numpy.random.permutation",
"numpy.ascontiguousarray"
] | [((561, 589), 'numpy.random.permutation', 'np.random.permutation', (['(i + 1)'], {}), '(i + 1)\n', (582, 589), True, 'import numpy as np\n'), ((737, 754), 'numpy.product', 'np.product', (['shape'], {}), '(shape)\n', (747, 754), True, 'import numpy as np\n'), ((775, 793), 'numpy.arange', 'np.arange', (['entries'], {}), '(entries)\n', (784, 793), True, 'import numpy as np\n'), ((1013, 1058), 'numpy.alltrue', 'np.alltrue', (['(np_transposed == by_transposeCNTK)'], {}), '(np_transposed == by_transposeCNTK)\n', (1023, 1058), True, 'import numpy as np\n'), ((668, 691), 'numpy.random.randint', 'np.random.randint', (['(2)', '(5)'], {}), '(2, 5)\n', (685, 691), True, 'import numpy as np\n'), ((867, 880), 'numpy.copy', 'np.copy', (['data'], {}), '(data)\n', (874, 880), True, 'import numpy as np\n'), ((882, 902), 'numpy.copy', 'np.copy', (['permutation'], {}), '(permutation)\n', (889, 902), True, 'import numpy as np\n'), ((945, 971), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['data'], {}), '(data)\n', (965, 971), True, 'import numpy as np\n')] |
"""
@author: <NAME>
__license__= "LGPL"
"""
import numpy as np
import easyvvuq as uq
import os
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib.ticker import ScalarFormatter, NullFormatter
plt.rcParams.update({'font.size': 18, 'legend.fontsize': 15})
plt.rcParams['figure.figsize'] = 12,9
"""
*************
* Load data *
*************
"""
workdir = '/export/scratch1/federica/VirsimCampaigns'#'/tmp'
# home directory of this file
HOME = os.path.abspath(os.path.dirname(__file__))
# Reload the FC campaign without biology
FC_campaign = uq.Campaign(state_file = "campaign_state_FC_MC2k.json", work_dir = workdir)
print('========================================================')
print('Reloaded campaign', FC_campaign.campaign_dir.split('/')[-1])
print('========================================================')
FC_sampler = FC_campaign._active_sampler
FC_output_columns = FC_campaign._active_app_decoder.output_columns
FC_qmc_analysis = uq.analysis.QMCAnalysis(sampler=FC_sampler, qoi_cols=FC_output_columns)
# collate output
FC_campaign.collate()
# get full dataset of data
FC_data = FC_campaign.get_collation_result()
#print(FC_data.columns)
# Reload the CT campaign without biology
CT_campaign = uq.Campaign(state_file = "campaign_state_CT_MC2k_newdistr.json", work_dir = workdir)
print('========================================================')
print('Reloaded campaign', CT_campaign.campaign_dir.split('/')[-1])
print('========================================================')
CT_sampler = CT_campaign._active_sampler
CT_output_columns = CT_campaign._active_app_decoder.output_columns
CT_qmc_analysis = uq.analysis.QMCAnalysis(sampler=CT_sampler, qoi_cols=CT_output_columns)
# collate output
CT_campaign.collate()
# get full dataset of data
CT_data = CT_campaign.get_collation_result()
#print(CT_data.columns)
# Reload the IL campaign without biology
IL_campaign = uq.Campaign(state_file = "campaign_state_IL_nobio_MC2k.json", work_dir = workdir)
print('========================================================')
print('Reloaded campaign', IL_campaign.campaign_dir.split('/')[-1])
print('========================================================')
IL_sampler = IL_campaign._active_sampler
IL_output_columns = IL_campaign._active_app_decoder.output_columns
IL_qmc_analysis = uq.analysis.QMCAnalysis(sampler=IL_sampler, qoi_cols=IL_output_columns)
# collate output
IL_campaign.collate()
# get full dataset of data
IL_data = IL_campaign.get_collation_result()
#print(IL_data.columns)
# Reload the PO campaign without biology
PO_campaign = uq.Campaign(state_file = "campaign_state_PO_MC2k.json", work_dir = workdir)
print('========================================================')
print('Reloaded campaign', PO_campaign.campaign_dir.split('/')[-1])
print('========================================================')
PO_sampler = PO_campaign._active_sampler
PO_output_columns = PO_campaign._active_app_decoder.output_columns
PO_qmc_analysis = uq.analysis.QMCAnalysis(sampler=PO_sampler, qoi_cols=PO_output_columns)
# collate output
PO_campaign.collate()
# get full dataset of data
PO_data = PO_campaign.get_collation_result()
#print(PO_data.columns)
###############################################################################################
# Reload the FC campaign with biology
FC_bio_campaign = uq.Campaign(state_file = "campaign_state_FC_bio_cdf_2k.json", work_dir = workdir)
print('========================================================')
print('Reloaded campaign', FC_bio_campaign.campaign_dir.split('/')[-1])
print('========================================================')
# collate output
FC_bio_campaign.collate()
# get full dataset of data
FC_bio_data = FC_bio_campaign.get_collation_result()
#print(FC_bio_data.columns)
# Reload the CT campaign with biology
CT_bio_campaign = uq.Campaign(state_file = "campaign_state_CT_bio_cdf_2k.json", work_dir = workdir)
print('========================================================')
print('Reloaded campaign', CT_bio_campaign.campaign_dir.split('/')[-1])
print('========================================================')
# collate output
CT_bio_campaign.collate()
# get full dataset of data
CT_bio_data = CT_bio_campaign.get_collation_result()
#print(CT_bio_data.columns)
# Reload the IL campaign with biology
IL_bio_campaign = uq.Campaign(state_file = "campaign_state_IL_bio_cdf_2k.json", work_dir = workdir)
print('========================================================')
print('Reloaded campaign', IL_bio_campaign.campaign_dir.split('/')[-1])
print('========================================================')
# get sampler and output columns from my_campaign object
IL_bio_sampler = IL_bio_campaign._active_sampler
#output_columns = my_campaign._active_app_decoder.output_columns
# collate output
IL_bio_campaign.collate()
# get full dataset of data
IL_bio_data = IL_bio_campaign.get_collation_result()
#print(IL_bio_data.columns)
# Reload the PO campaign with biology
PO_bio_campaign = uq.Campaign(state_file = "campaign_state_PO_bio_cdf_2k.json", work_dir = workdir)
print('========================================================')
print('Reloaded campaign', PO_bio_campaign.campaign_dir.split('/')[-1])
print('========================================================')
# get sampler and output columns from my_campaign object
PO_bio_sampler = PO_bio_campaign._active_sampler
#output_columns = my_campaign._active_app_decoder.output_columns
# collate output
PO_bio_campaign.collate()
# get full dataset of data
PO_bio_data = PO_bio_campaign.get_collation_result()
#print(PO_bio_data.columns)
#####################################################################################################
"""
*************************
* Empirical CDF of QoIs *
*************************
"""
L = 551
IC_capacity = 109
n_runs = 2000
alpha_DKW = 0.05
eps_DKW = np.sqrt( np.log(2/alpha_DKW) / (2*n_runs) )
# without bio
FC_IC_prev_avg_max, f_M1, f_Ni = FC_qmc_analysis._separate_output_values(FC_data.IC_prev_avg_max, 3, n_runs) #np.zeros(n_runs,dtype='float')
FC_IC_ex_max, f_M1, f_Ni = FC_qmc_analysis._separate_output_values(FC_data.IC_ex_max, 3, n_runs) #np.zeros(n_runs,dtype='float')
CT_IC_prev_avg_max, f_M1, f_Ni = CT_qmc_analysis._separate_output_values(CT_data.IC_prev_avg_max, 4, n_runs) #np.zeros(n_runs,dtype='float')
CT_IC_ex_max, f_M1, f_Ni = CT_qmc_analysis._separate_output_values(CT_data.IC_ex_max, 4, n_runs) #np.zeros(n_runs,dtype='float')
IL_IC_prev_avg_max, f_M1, f_Ni = IL_qmc_analysis._separate_output_values(IL_data.IC_prev_avg_max, 5, n_runs) #np.zeros(n_runs,dtype='float')
IL_IC_ex_max, f_M1, f_Ni = IL_qmc_analysis._separate_output_values(IL_data.IC_ex_max, 5, n_runs) #np.zeros(n_runs,dtype='float')
PO_IC_prev_avg_max, f_M1, f_Ni = PO_qmc_analysis._separate_output_values(PO_data.IC_prev_avg_max, 4, n_runs) #np.zeros(n_runs,dtype='float')
PO_IC_ex_max, f_M1, f_Ni = PO_qmc_analysis._separate_output_values(PO_data.IC_ex_max, 4, n_runs) #np.zeros(n_runs,dtype='float')
# with bio
FC_IC_prev_avg_max_bio = FC_bio_data.IC_prev_avg_max.to_numpy() #np.zeros(n_runs,dtype='float')
FC_IC_ex_max_bio = FC_bio_data.IC_ex_max.to_numpy() #np.zeros(n_runs,dtype='float')
CT_IC_prev_avg_max_bio = CT_bio_data.IC_prev_avg_max.to_numpy() #np.zeros(n_runs,dtype='float')
CT_IC_ex_max_bio = CT_bio_data.IC_ex_max.to_numpy() #np.zeros(n_runs,dtype='float')
IL_IC_prev_avg_max_bio = IL_bio_data.IC_prev_avg_max.to_numpy() #np.zeros(n_runs,dtype='float')
IL_IC_ex_max_bio = IL_bio_data.IC_ex_max.to_numpy() #np.zeros(n_runs,dtype='float')
PO_IC_prev_avg_max_bio = PO_bio_data.IC_prev_avg_max.to_numpy() #np.zeros(n_runs,dtype='float')
PO_IC_ex_max_bio = PO_bio_data.IC_ex_max.to_numpy() #np.zeros(n_runs,dtype='float')
#for i in range(n_runs):
# # without bio
# # FC
# FC_IC_prev_avg_max[i] = FC_data.IC_prev_avg_max[i*L]
# FC_IC_ex_max[i] = FC_data.IC_ex_max[i*L]
# # CT
# CT_IC_prev_avg_max[i] = CT_data.IC_prev_avg_max[i*L]
# CT_IC_ex_max[i] = CT_data.IC_ex_max[i*L]
# # IL
# IL_IC_prev_avg_max[i] = IL_data.IC_prev_avg_max[i*L]
# IL_IC_ex_max[i] = IL_data.IC_ex_max[i*L]
# # PO
# PO_IC_prev_avg_max[i] = PO_data.IC_prev_avg_max[i*L]
# PO_IC_ex_max[i] = PO_data.IC_ex_max[i*L]
#
# # with bio
# # FC
# FC_IC_prev_avg_max_bio[i] = FC_bio_data.IC_prev_avg_max[i*L]
# FC_IC_ex_max_bio[i] = FC_bio_data.IC_ex_max[i*L]
# # # CT
# # CT_IC_prev_avg_max_bio[i] = CT_bio_data.IC_prev_avg_max[i*L]
# # CT_IC_ex_max_bio[i] = CT_bio_data.IC_ex_max[i*L]
# # IL
# IL_IC_prev_avg_max_bio[i] = IL_bio_data.IC_prev_avg_max[i*L]
# IL_IC_ex_max_bio[i] = IL_bio_data.IC_ex_max[i*L]
# # PO
# PO_IC_prev_avg_max_bio[i] = PO_bio_data.IC_prev_avg_max[i*L]
# PO_IC_ex_max_bio[i] = PO_bio_data.IC_ex_max[i*L]
FC_IC_prev_avg_max = np.sort(FC_IC_prev_avg_max, axis=None)
FC_IC_ex_max= np.sort(FC_IC_ex_max, axis=None)
CT_IC_prev_avg_max = np.sort(CT_IC_prev_avg_max, axis=None)
CT_IC_ex_max = np.sort(CT_IC_ex_max, axis=None)
IL_IC_prev_avg_max = np.sort(IL_IC_prev_avg_max, axis=None)
IL_IC_ex_max = np.sort(IL_IC_ex_max, axis=None)
PO_IC_prev_avg_max = np.sort(PO_IC_prev_avg_max, axis=None)
PO_IC_ex_max = np.sort(PO_IC_ex_max, axis=None)
FC_IC_prev_avg_max_bio = np.sort(FC_IC_prev_avg_max_bio, axis=None)
FC_IC_ex_max_bio = np.sort(FC_IC_ex_max_bio, axis=None)
CT_IC_prev_avg_max_bio = np.sort(CT_IC_prev_avg_max_bio, axis=None)
CT_IC_ex_max_bio = np.sort(CT_IC_ex_max_bio, axis=None)
IL_IC_prev_avg_max_bio = np.sort(IL_IC_prev_avg_max_bio, axis=None)
IL_IC_ex_max_bio = np.sort(IL_IC_ex_max_bio, axis=None)
PO_IC_prev_avg_max_bio = np.sort(PO_IC_prev_avg_max_bio, axis=None)
PO_IC_ex_max_bio = np.sort(PO_IC_ex_max_bio, axis=None)
p = np.arange(start=1,stop=n_runs+1,step=1)/n_runs
"""
********
* Plot *
********
"""
f = plt.figure('cdfs',figsize=[12,7])
ax_p_bio = f.add_subplot(221, ylabel='All uncertainties \n \n Probability')
# with biology
ax_p_bio.step(FC_IC_prev_avg_max_bio,p,lw=2,color='orchid',label='FC')
ax_p_bio.step(FC_IC_prev_avg_max_bio,p+eps_DKW,lw=1,color='plum',ls='--')
ax_p_bio.step(FC_IC_prev_avg_max_bio,p-eps_DKW,lw=1,color='plum',ls='--')
#
ax_p_bio.step(CT_IC_prev_avg_max_bio,p,lw=2,color='cornflowerblue',label='CT')
ax_p_bio.step(CT_IC_prev_avg_max_bio,p+eps_DKW,lw=1,color='lightskyblue',ls='--')
ax_p_bio.step(CT_IC_prev_avg_max_bio,p-eps_DKW,lw=1,color='lightskyblue',ls='--')
#
ax_p_bio.step(IL_IC_prev_avg_max_bio,p,lw=2,color='salmon',label='IL')
ax_p_bio.step(IL_IC_prev_avg_max_bio,p+eps_DKW,lw=1,color='lightsalmon',ls='--')
ax_p_bio.step(IL_IC_prev_avg_max_bio,p-eps_DKW,lw=1,color='lightsalmon',ls='--')
#
ax_p_bio.step(PO_IC_prev_avg_max_bio,p,lw=2,color='lightseagreen',label='PO')
ax_p_bio.step(PO_IC_prev_avg_max_bio,p+eps_DKW,lw=1,color='mediumaquamarine',ls='--')
ax_p_bio.step(PO_IC_prev_avg_max_bio,p-eps_DKW,lw=1,color='mediumaquamarine',ls='--')
#
ax_p_bio.axvline(x=IC_capacity,lw=2,linestyle=':',color='black')#,label='IC capacity')
# general settings
ax_p_bio.set_xscale('log')
# ax_p.set_xticks([3e2, 1e3])
ax_p_bio.get_xaxis().get_major_formatter().labelOnlyBase = False
ax_p_bio.get_xaxis().set_minor_formatter(NullFormatter())
# ax_p_bio.set_xlim([1e1, 1e3])
ax_p_bio.set_xticks([1e1, 1e2, 1e3])
ax_p_bio.set_yticks([0, 0.5, 1])
leg = ax_p_bio.legend(loc='upper left')
leg.get_frame().set_linewidth(0.0)
leg.get_frame().set_facecolor('none')
# ax_e_bio.legend(loc='upper center')
# plt.legend(frameon=False)
ax_e_bio = f.add_subplot(222)
# with biology
ax_e_bio.step(FC_IC_ex_max_bio,p,lw=2,color='orchid')
ax_e_bio.step(FC_IC_ex_max_bio,p+eps_DKW,lw=1,color='plum',ls='--')
ax_e_bio.step(FC_IC_ex_max_bio,p-eps_DKW,lw=1,color='plum',ls='--')
#
ax_e_bio.step(CT_IC_ex_max_bio,p,lw=2,color='cornflowerblue')
ax_e_bio.step(CT_IC_ex_max_bio,p+eps_DKW,lw=1,color='lightskyblue',ls='--')
ax_e_bio.step(CT_IC_ex_max_bio,p-eps_DKW,lw=1,color='lightskyblue',ls='--')
#
ax_e_bio.step(IL_IC_ex_max_bio,p,lw=2,color='salmon')
ax_e_bio.step(IL_IC_ex_max_bio,p+eps_DKW,lw=1,color='lightsalmon',ls='--')
ax_e_bio.step(IL_IC_ex_max_bio,p-eps_DKW,lw=1,color='lightsalmon',ls='--')
#
ax_e_bio.step(PO_IC_ex_max_bio,p,lw=2,color='lightseagreen')
ax_e_bio.step(PO_IC_ex_max_bio,p+eps_DKW,lw=1,color='mediumaquamarine',ls='--')
ax_e_bio.step(PO_IC_ex_max_bio,p-eps_DKW,lw=1,color='mediumaquamarine',ls='--')
#
# general settings
ax_e_bio.set_xscale('log')
#ax_e.get_xaxis().set_major_formatter(ScalarFormatter())
ax_e_bio.get_xaxis().get_major_formatter().labelOnlyBase = False
ax_e_bio.get_xaxis().set_minor_formatter(NullFormatter())
ax_e_bio.set_xlim([1e0, 1e5])
ax_e_bio.set_xticks([1e0, 1e1, 1e2, 1e3, 1e4, 1e5])
ax_e_bio.set_yticks([0, 0.5, 1])
ax_p = f.add_subplot(223, xlabel='Maximum of patients in IC \n per million capita', \
ylabel='Only seed and \n policy-related uncertainties \n \n Probability')
# without biology
ax_p.step(FC_IC_prev_avg_max,p,lw=2,color='orchid',label='FC')
ax_p.step(FC_IC_prev_avg_max,p+eps_DKW,lw=1,color='plum',ls='--')
ax_p.step(FC_IC_prev_avg_max,p-eps_DKW,lw=1,color='plum',ls='--')
#
ax_p.step(CT_IC_prev_avg_max,p,lw=2,color='cornflowerblue',label='CT')
ax_p.step(CT_IC_prev_avg_max,p+eps_DKW,lw=1,color='lightskyblue',ls='--')
ax_p.step(CT_IC_prev_avg_max,p-eps_DKW,lw=1,color='lightskyblue',ls='--')
#
ax_p.step(IL_IC_prev_avg_max,p,lw=2,color='salmon',label='SL')
ax_p.step(IL_IC_prev_avg_max,p+eps_DKW,lw=1,color='lightsalmon',ls='--')
ax_p.step(IL_IC_prev_avg_max,p-eps_DKW,lw=1,color='lightsalmon',ls='--')
#
ax_p.step(PO_IC_prev_avg_max,p,lw=2,color='lightseagreen',label='PO')
ax_p.step(PO_IC_prev_avg_max,p+eps_DKW,lw=1,color='mediumaquamarine',ls='--')
ax_p.step(PO_IC_prev_avg_max,p-eps_DKW,lw=1,color='mediumaquamarine',ls='--')
#
ax_p.axvline(x=IC_capacity,lw=2,linestyle=':',color='black')#,label='IC capacity')
# general settings
ax_p.set_xscale('log')
# ax_p.set_xticks([3e2, 1e3])
ax_p.get_xaxis().get_major_formatter().labelOnlyBase = False
ax_p.get_xaxis().set_minor_formatter(NullFormatter())
# ax_p.set_xlim([1e1, 1e3])
ax_p.set_xticks([1e1, 1e2, 1e3])
ax_p.set_yticks([0, 0.5, 1])
ax_e = f.add_subplot(224, xlabel='IC patient-days in excess \n per million capita')
# without biology
ax_e.step(FC_IC_ex_max,p,lw=2,color='orchid')
ax_e.step(FC_IC_ex_max,p+eps_DKW,lw=1,color='plum',ls='--')
ax_e.step(FC_IC_ex_max,p-eps_DKW,lw=1,color='plum',ls='--')
#
ax_e.step(CT_IC_ex_max,p,lw=2,color='cornflowerblue')
ax_e.step(CT_IC_ex_max,p+eps_DKW,lw=1,color='lightskyblue',ls='--')
ax_e.step(CT_IC_ex_max,p-eps_DKW,lw=1,color='lightskyblue',ls='--')
#
ax_e.step(IL_IC_ex_max,p,lw=2,color='salmon')
ax_e.step(IL_IC_ex_max,p+eps_DKW,lw=1,color='lightsalmon',ls='--')
ax_e.step(IL_IC_ex_max,p-eps_DKW,lw=1,color='lightsalmon',ls='--')
#
ax_e.step(PO_IC_ex_max,p,lw=2,color='lightseagreen')
ax_e.step(PO_IC_ex_max,p+eps_DKW,lw=1,color='mediumaquamarine',ls='--')
ax_e.step(PO_IC_ex_max,p-eps_DKW,lw=1,color='mediumaquamarine',ls='--')
#
# general settings
ax_e.set_xscale('log')
# ax_e.set_xticks([1e4, 6e4])
#ax_e.get_xaxis().set_major_formatter(ScalarFormatter())
ax_e.get_xaxis().get_major_formatter().labelOnlyBase = False
ax_e.get_xaxis().set_minor_formatter(NullFormatter())
ax_e.set_xlim([1e0, 1e5])
ax_e.set_xticks([1e0, 1e1, 1e2, 1e3, 1e4, 1e5])
ax_e.set_yticks([0, 0.5, 1])
# ax_p.legend(loc='upper center')
plt.tight_layout()
f.savefig('figures/Fig2_cdfs.eps')
plt.show()
### END OF CODE ###
| [
"easyvvuq.Campaign",
"easyvvuq.analysis.QMCAnalysis",
"matplotlib.pyplot.show",
"numpy.log",
"os.path.dirname",
"numpy.sort",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.rcParams.update",
"numpy.arange",
"matplotlib.ticker.NullFormatter",
"matplotlib.pyplot.tight_layout"
] | [((210, 271), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 18, 'legend.fontsize': 15}"], {}), "({'font.size': 18, 'legend.fontsize': 15})\n", (229, 271), True, 'import matplotlib.pyplot as plt\n'), ((563, 634), 'easyvvuq.Campaign', 'uq.Campaign', ([], {'state_file': '"""campaign_state_FC_MC2k.json"""', 'work_dir': 'workdir'}), "(state_file='campaign_state_FC_MC2k.json', work_dir=workdir)\n", (574, 634), True, 'import easyvvuq as uq\n'), ((966, 1037), 'easyvvuq.analysis.QMCAnalysis', 'uq.analysis.QMCAnalysis', ([], {'sampler': 'FC_sampler', 'qoi_cols': 'FC_output_columns'}), '(sampler=FC_sampler, qoi_cols=FC_output_columns)\n', (989, 1037), True, 'import easyvvuq as uq\n'), ((1230, 1315), 'easyvvuq.Campaign', 'uq.Campaign', ([], {'state_file': '"""campaign_state_CT_MC2k_newdistr.json"""', 'work_dir': 'workdir'}), "(state_file='campaign_state_CT_MC2k_newdistr.json', work_dir=workdir\n )\n", (1241, 1315), True, 'import easyvvuq as uq\n'), ((1642, 1713), 'easyvvuq.analysis.QMCAnalysis', 'uq.analysis.QMCAnalysis', ([], {'sampler': 'CT_sampler', 'qoi_cols': 'CT_output_columns'}), '(sampler=CT_sampler, qoi_cols=CT_output_columns)\n', (1665, 1713), True, 'import easyvvuq as uq\n'), ((1906, 1983), 'easyvvuq.Campaign', 'uq.Campaign', ([], {'state_file': '"""campaign_state_IL_nobio_MC2k.json"""', 'work_dir': 'workdir'}), "(state_file='campaign_state_IL_nobio_MC2k.json', work_dir=workdir)\n", (1917, 1983), True, 'import easyvvuq as uq\n'), ((2315, 2386), 'easyvvuq.analysis.QMCAnalysis', 'uq.analysis.QMCAnalysis', ([], {'sampler': 'IL_sampler', 'qoi_cols': 'IL_output_columns'}), '(sampler=IL_sampler, qoi_cols=IL_output_columns)\n', (2338, 2386), True, 'import easyvvuq as uq\n'), ((2579, 2650), 'easyvvuq.Campaign', 'uq.Campaign', ([], {'state_file': '"""campaign_state_PO_MC2k.json"""', 'work_dir': 'workdir'}), "(state_file='campaign_state_PO_MC2k.json', work_dir=workdir)\n", (2590, 2650), True, 'import easyvvuq as uq\n'), ((2982, 3053), 'easyvvuq.analysis.QMCAnalysis', 'uq.analysis.QMCAnalysis', ([], {'sampler': 'PO_sampler', 'qoi_cols': 'PO_output_columns'}), '(sampler=PO_sampler, qoi_cols=PO_output_columns)\n', (3005, 3053), True, 'import easyvvuq as uq\n'), ((3344, 3421), 'easyvvuq.Campaign', 'uq.Campaign', ([], {'state_file': '"""campaign_state_FC_bio_cdf_2k.json"""', 'work_dir': 'workdir'}), "(state_file='campaign_state_FC_bio_cdf_2k.json', work_dir=workdir)\n", (3355, 3421), True, 'import easyvvuq as uq\n'), ((3839, 3916), 'easyvvuq.Campaign', 'uq.Campaign', ([], {'state_file': '"""campaign_state_CT_bio_cdf_2k.json"""', 'work_dir': 'workdir'}), "(state_file='campaign_state_CT_bio_cdf_2k.json', work_dir=workdir)\n", (3850, 3916), True, 'import easyvvuq as uq\n'), ((4334, 4411), 'easyvvuq.Campaign', 'uq.Campaign', ([], {'state_file': '"""campaign_state_IL_bio_cdf_2k.json"""', 'work_dir': 'workdir'}), "(state_file='campaign_state_IL_bio_cdf_2k.json', work_dir=workdir)\n", (4345, 4411), True, 'import easyvvuq as uq\n'), ((5001, 5078), 'easyvvuq.Campaign', 'uq.Campaign', ([], {'state_file': '"""campaign_state_PO_bio_cdf_2k.json"""', 'work_dir': 'workdir'}), "(state_file='campaign_state_PO_bio_cdf_2k.json', work_dir=workdir)\n", (5012, 5078), True, 'import easyvvuq as uq\n'), ((8815, 8853), 'numpy.sort', 'np.sort', (['FC_IC_prev_avg_max'], {'axis': 'None'}), '(FC_IC_prev_avg_max, axis=None)\n', (8822, 8853), True, 'import numpy as np\n'), ((8868, 8900), 'numpy.sort', 'np.sort', (['FC_IC_ex_max'], {'axis': 'None'}), '(FC_IC_ex_max, axis=None)\n', (8875, 8900), True, 'import numpy as np\n'), ((8923, 8961), 'numpy.sort', 'np.sort', (['CT_IC_prev_avg_max'], {'axis': 'None'}), '(CT_IC_prev_avg_max, axis=None)\n', (8930, 8961), True, 'import numpy as np\n'), ((8977, 9009), 'numpy.sort', 'np.sort', (['CT_IC_ex_max'], {'axis': 'None'}), '(CT_IC_ex_max, axis=None)\n', (8984, 9009), True, 'import numpy as np\n'), ((9032, 9070), 'numpy.sort', 'np.sort', (['IL_IC_prev_avg_max'], {'axis': 'None'}), '(IL_IC_prev_avg_max, axis=None)\n', (9039, 9070), True, 'import numpy as np\n'), ((9086, 9118), 'numpy.sort', 'np.sort', (['IL_IC_ex_max'], {'axis': 'None'}), '(IL_IC_ex_max, axis=None)\n', (9093, 9118), True, 'import numpy as np\n'), ((9141, 9179), 'numpy.sort', 'np.sort', (['PO_IC_prev_avg_max'], {'axis': 'None'}), '(PO_IC_prev_avg_max, axis=None)\n', (9148, 9179), True, 'import numpy as np\n'), ((9195, 9227), 'numpy.sort', 'np.sort', (['PO_IC_ex_max'], {'axis': 'None'}), '(PO_IC_ex_max, axis=None)\n', (9202, 9227), True, 'import numpy as np\n'), ((9254, 9296), 'numpy.sort', 'np.sort', (['FC_IC_prev_avg_max_bio'], {'axis': 'None'}), '(FC_IC_prev_avg_max_bio, axis=None)\n', (9261, 9296), True, 'import numpy as np\n'), ((9316, 9352), 'numpy.sort', 'np.sort', (['FC_IC_ex_max_bio'], {'axis': 'None'}), '(FC_IC_ex_max_bio, axis=None)\n', (9323, 9352), True, 'import numpy as np\n'), ((9379, 9421), 'numpy.sort', 'np.sort', (['CT_IC_prev_avg_max_bio'], {'axis': 'None'}), '(CT_IC_prev_avg_max_bio, axis=None)\n', (9386, 9421), True, 'import numpy as np\n'), ((9441, 9477), 'numpy.sort', 'np.sort', (['CT_IC_ex_max_bio'], {'axis': 'None'}), '(CT_IC_ex_max_bio, axis=None)\n', (9448, 9477), True, 'import numpy as np\n'), ((9504, 9546), 'numpy.sort', 'np.sort', (['IL_IC_prev_avg_max_bio'], {'axis': 'None'}), '(IL_IC_prev_avg_max_bio, axis=None)\n', (9511, 9546), True, 'import numpy as np\n'), ((9566, 9602), 'numpy.sort', 'np.sort', (['IL_IC_ex_max_bio'], {'axis': 'None'}), '(IL_IC_ex_max_bio, axis=None)\n', (9573, 9602), True, 'import numpy as np\n'), ((9629, 9671), 'numpy.sort', 'np.sort', (['PO_IC_prev_avg_max_bio'], {'axis': 'None'}), '(PO_IC_prev_avg_max_bio, axis=None)\n', (9636, 9671), True, 'import numpy as np\n'), ((9691, 9727), 'numpy.sort', 'np.sort', (['PO_IC_ex_max_bio'], {'axis': 'None'}), '(PO_IC_ex_max_bio, axis=None)\n', (9698, 9727), True, 'import numpy as np\n'), ((9821, 9856), 'matplotlib.pyplot.figure', 'plt.figure', (['"""cdfs"""'], {'figsize': '[12, 7]'}), "('cdfs', figsize=[12, 7])\n", (9831, 9856), True, 'import matplotlib.pyplot as plt\n'), ((15317, 15335), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (15333, 15335), True, 'import matplotlib.pyplot as plt\n'), ((15373, 15383), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15381, 15383), True, 'import matplotlib.pyplot as plt\n'), ((480, 505), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (495, 505), False, 'import os\n'), ((9733, 9776), 'numpy.arange', 'np.arange', ([], {'start': '(1)', 'stop': '(n_runs + 1)', 'step': '(1)'}), '(start=1, stop=n_runs + 1, step=1)\n', (9742, 9776), True, 'import numpy as np\n'), ((11168, 11183), 'matplotlib.ticker.NullFormatter', 'NullFormatter', ([], {}), '()\n', (11181, 11183), False, 'from matplotlib.ticker import ScalarFormatter, NullFormatter\n'), ((12559, 12574), 'matplotlib.ticker.NullFormatter', 'NullFormatter', ([], {}), '()\n', (12572, 12574), False, 'from matplotlib.ticker import ScalarFormatter, NullFormatter\n'), ((13984, 13999), 'matplotlib.ticker.NullFormatter', 'NullFormatter', ([], {}), '()\n', (13997, 13999), False, 'from matplotlib.ticker import ScalarFormatter, NullFormatter\n'), ((15162, 15177), 'matplotlib.ticker.NullFormatter', 'NullFormatter', ([], {}), '()\n', (15175, 15177), False, 'from matplotlib.ticker import ScalarFormatter, NullFormatter\n'), ((5880, 5901), 'numpy.log', 'np.log', (['(2 / alpha_DKW)'], {}), '(2 / alpha_DKW)\n', (5886, 5901), True, 'import numpy as np\n')] |
# Copyright 2020 The MuLT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from skopt import gp_minimize
from skopt.space import Real, Integer, Categorical
from skopt.utils import use_named_args
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import log_loss
from sklearn.svm import SVC
import numpy as np
import warnings
warnings.filterwarnings("ignore")
def sigmoid(x, alpha=1.):
return 1. / (1. + np.exp(-alpha * x))
class SVMOptimizer(object):
def __init__(self,
n_folds=3, n_calls=50, shuffle=True, early_stopping_rounds=None,
fixed_parameters={}, random_state=None, verbose=-1, n_jobs=-1):
self.n_calls = n_calls
self.n_folds = n_folds
self.random_state = random_state
self.shuffle = shuffle
self.verbose = verbose
self.n_jobs = n_jobs
self.optimization_details = {}
self.early_stopping_rounds = early_stopping_rounds
self.fixed_parameters = fixed_parameters
def execute_optimization(self, objective, space):
params = gp_minimize(objective, space, n_calls=self.n_calls, random_state=self.random_state,
verbose=(self.verbose >= 0), n_jobs=self.n_jobs).x
return {space[i].name: params[i] for i in range(len(space))}
def optimize(self, x, y):
space = [
Real(1e-6, 1e+6, prior='log-uniform', name='C'),
Real(1e-6, 1e+1, prior='log-uniform', name='gamma'),
Integer(1, 8, name='degree'),
Categorical(['linear', 'poly', 'rbf'], name='kernel')]
@use_named_args(space)
def objective(C, gamma, degree, kernel):
try:
scores = []
params = {
'C': C,
'gamma': gamma,
'degree': degree,
'kernel': kernel,
'random_state': self.random_state}
if isinstance(self.fixed_parameters, dict):
params.update(self.fixed_parameters)
skf = StratifiedKFold(
self.n_folds, shuffle=self.shuffle, random_state=self.random_state)
for train_index, valid_index in skf.split(x, y):
try:
x_train, y_train = x[train_index, :], y[train_index, 0]
except IndexError:
x_train, y_train = x[train_index, :], y[train_index].reshape(-1,)
try:
x_valid, y_valid = x[valid_index, :], y[valid_index, 0]
except IndexError:
x_valid, y_valid = x[valid_index, :], y[valid_index].reshape(-1,)
svm = SVC(**params)
svm.fit(x_train, y_train)
y_hat = sigmoid(svm.predict(x_valid))
scores.append(log_loss(y_valid, y_hat))
return np.mean(scores)
except ValueError:
return np.inf
return self.execute_optimization(objective, space)
| [
"skopt.gp_minimize",
"skopt.space.Categorical",
"warnings.filterwarnings",
"skopt.utils.use_named_args",
"skopt.space.Integer",
"sklearn.metrics.log_loss",
"skopt.space.Real",
"numpy.mean",
"sklearn.model_selection.StratifiedKFold",
"numpy.exp",
"sklearn.svm.SVC"
] | [((958, 991), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (981, 991), False, 'import warnings\n'), ((2221, 2242), 'skopt.utils.use_named_args', 'use_named_args', (['space'], {}), '(space)\n', (2235, 2242), False, 'from skopt.utils import use_named_args\n'), ((1042, 1060), 'numpy.exp', 'np.exp', (['(-alpha * x)'], {}), '(-alpha * x)\n', (1048, 1060), True, 'import numpy as np\n'), ((1692, 1827), 'skopt.gp_minimize', 'gp_minimize', (['objective', 'space'], {'n_calls': 'self.n_calls', 'random_state': 'self.random_state', 'verbose': '(self.verbose >= 0)', 'n_jobs': 'self.n_jobs'}), '(objective, space, n_calls=self.n_calls, random_state=self.\n random_state, verbose=self.verbose >= 0, n_jobs=self.n_jobs)\n', (1703, 1827), False, 'from skopt import gp_minimize\n'), ((1988, 2041), 'skopt.space.Real', 'Real', (['(1e-06)', '(1000000.0)'], {'prior': '"""log-uniform"""', 'name': '"""C"""'}), "(1e-06, 1000000.0, prior='log-uniform', name='C')\n", (1992, 2041), False, 'from skopt.space import Real, Integer, Categorical\n'), ((2049, 2101), 'skopt.space.Real', 'Real', (['(1e-06)', '(10.0)'], {'prior': '"""log-uniform"""', 'name': '"""gamma"""'}), "(1e-06, 10.0, prior='log-uniform', name='gamma')\n", (2053, 2101), False, 'from skopt.space import Real, Integer, Categorical\n'), ((2114, 2142), 'skopt.space.Integer', 'Integer', (['(1)', '(8)'], {'name': '"""degree"""'}), "(1, 8, name='degree')\n", (2121, 2142), False, 'from skopt.space import Real, Integer, Categorical\n'), ((2156, 2209), 'skopt.space.Categorical', 'Categorical', (["['linear', 'poly', 'rbf']"], {'name': '"""kernel"""'}), "(['linear', 'poly', 'rbf'], name='kernel')\n", (2167, 2209), False, 'from skopt.space import Real, Integer, Categorical\n'), ((2701, 2789), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', (['self.n_folds'], {'shuffle': 'self.shuffle', 'random_state': 'self.random_state'}), '(self.n_folds, shuffle=self.shuffle, random_state=self.\n random_state)\n', (2716, 2789), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((3576, 3591), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (3583, 3591), True, 'import numpy as np\n'), ((3371, 3384), 'sklearn.svm.SVC', 'SVC', ([], {}), '(**params)\n', (3374, 3384), False, 'from sklearn.svm import SVC\n'), ((3526, 3550), 'sklearn.metrics.log_loss', 'log_loss', (['y_valid', 'y_hat'], {}), '(y_valid, y_hat)\n', (3534, 3550), False, 'from sklearn.metrics import log_loss\n')] |
import argparse
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
def main():
parser = argparse.ArgumentParser(description='Visualize two random malware bitstring.')
parser.add_argument('dataset', type=str, help='Dataset (csv.xz) file location.')
args = parser.parse_args()
df = pd.read_csv(args.dataset)
sample1 = df.sample(1).values.tolist()[0]
sample2 = df.sample(1).values.tolist()[0]
d1 = np.fromiter(sample1[2:], dtype=np.uint8) + 255
d2 = np.fromiter(sample2[2:], dtype=np.uint8) + 255
fig, axs = plt.subplots(1, 2, sharex=True, sharey=True)
ax = axs[0]
ax.imshow(np.reshape(d1, (100, 100)), cmap='gray_r')
ax.set_title(sample1[0], fontweight="bold", size=20)
ax = axs[1]
ax.imshow(np.reshape(d2, (100, 100)), cmap='gray_r')
ax.set_title(sample2[0], fontweight="bold", size=20)
plt.xticks([])
plt.yticks([])
plt.show()
if __name__ == '__main__':
main() | [
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"pandas.read_csv",
"matplotlib.pyplot.yticks",
"numpy.reshape",
"numpy.fromiter",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots"
] | [((117, 195), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Visualize two random malware bitstring."""'}), "(description='Visualize two random malware bitstring.')\n", (140, 195), False, 'import argparse\n'), ((322, 347), 'pandas.read_csv', 'pd.read_csv', (['args.dataset'], {}), '(args.dataset)\n', (333, 347), True, 'import pandas as pd\n'), ((570, 614), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharex': '(True)', 'sharey': '(True)'}), '(1, 2, sharex=True, sharey=True)\n', (582, 614), True, 'import matplotlib.pyplot as plt\n'), ((886, 900), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (896, 900), True, 'import matplotlib.pyplot as plt\n'), ((905, 919), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (915, 919), True, 'import matplotlib.pyplot as plt\n'), ((924, 934), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (932, 934), True, 'import matplotlib.pyplot as plt\n'), ((451, 491), 'numpy.fromiter', 'np.fromiter', (['sample1[2:]'], {'dtype': 'np.uint8'}), '(sample1[2:], dtype=np.uint8)\n', (462, 491), True, 'import numpy as np\n'), ((507, 547), 'numpy.fromiter', 'np.fromiter', (['sample2[2:]'], {'dtype': 'np.uint8'}), '(sample2[2:], dtype=np.uint8)\n', (518, 547), True, 'import numpy as np\n'), ((646, 672), 'numpy.reshape', 'np.reshape', (['d1', '(100, 100)'], {}), '(d1, (100, 100))\n', (656, 672), True, 'import numpy as np\n'), ((781, 807), 'numpy.reshape', 'np.reshape', (['d2', '(100, 100)'], {}), '(d2, (100, 100))\n', (791, 807), True, 'import numpy as np\n')] |
import numpy as np
from scipy import sparse
def spectrum(L, k=6):
"""
Compute the smallest k eigenvalues and corresponding eigenvectors
of the graph laplacian.
Parameters:
- - - - -
L: float, array
sparse laplacian matrix
k: int
number of eigenvectors / values to compute
Returns:
- - - -
E: float, array
eigenvectors
Lambda: float, array
eigenvalues
"""
[Lambda, E] = sparse.linalg.eigs(L, k=k, which='SM')
Lambda = np.real(Lambda)
E = np.real(E)
# ensure that eigenvalues and vectors are sorted in ascending order
idx = np.argsort(Lambda)
Lambda = Lambda[idx]
E = E[:, idx]
# scale eigenvectors by inverse sqare root of eigenvales
E[:,1:] = np.dot(E[:, 1:], np.diag(Lambda[1:]**(-0.5)))
signf = 1-2*(E[0,:]<0)
E = E*signf[None, :]
return [E, Lambda] | [
"numpy.argsort",
"numpy.diag",
"scipy.sparse.linalg.eigs",
"numpy.real"
] | [((459, 497), 'scipy.sparse.linalg.eigs', 'sparse.linalg.eigs', (['L'], {'k': 'k', 'which': '"""SM"""'}), "(L, k=k, which='SM')\n", (477, 497), False, 'from scipy import sparse\n'), ((511, 526), 'numpy.real', 'np.real', (['Lambda'], {}), '(Lambda)\n', (518, 526), True, 'import numpy as np\n'), ((535, 545), 'numpy.real', 'np.real', (['E'], {}), '(E)\n', (542, 545), True, 'import numpy as np\n'), ((629, 647), 'numpy.argsort', 'np.argsort', (['Lambda'], {}), '(Lambda)\n', (639, 647), True, 'import numpy as np\n'), ((784, 811), 'numpy.diag', 'np.diag', (['(Lambda[1:] ** -0.5)'], {}), '(Lambda[1:] ** -0.5)\n', (791, 811), True, 'import numpy as np\n')] |
"""MatterSim API.
Authors:
- <NAME> (<EMAIL>)
- <NAME> (<EMAIL>)
"""
from __future__ import division, print_function, unicode_literals
import os
import MatterSim
import math
import numpy as np
import modules.vis_utils
# https://github.com/peteanderson80/Matterport3DSimulator/blob/master/src/driver/driver.py
class MatterSimAPI(object):
"""MatterSim API."""
def __init__(self, params):
self.params = self._setup_params(params)
def _setup_params(self, params):
# Image params
params['width'] = int(params['width'])
params['height'] = int(params['height'])
params['depth'] = bool(params['depth'])
params['annotate_image'] = bool(params['annotate_image'])
# FOV Params
params['hfov_RAD'] = math.radians(params['hfov_DEG'])
params['vfov_RAD'] = params['hfov_RAD'] / params['width'] * params['height']
params['vfov_DEG'] = np.rad2deg(params['vfov_RAD'])
# STDOUT
print("Parameters")
print("----------")
for param_key, param_val in params.items():
print(f'{param_key:15s} {param_val}')
return params
def add_to_params(self, new_params):
for key, val in new_params.items():
if key in self.params:
print(f"{key} already exists in self.params!")
else:
self.params[key] = val
def _reset_history(self):
self.history = {
"movement" : [],
"location" : []
}
def update_history(self, key, vals):
self.history[key].append(vals)
def find_intersection_image_ids(self, scene_id, connectivity):
self._init_sim_enviornment()
image_ids = [ele['image_id'] for ele in connectivity]
intersection_set = []
for image_id in image_ids:
try:
self.sim.newEpisode([scene_id], [image_id], [0], [0])
intersection_set.append(image_id)
except Exception as e:
pass
self.close()
return intersection_set
def _init_sim_enviornment(self):
self.sim = MatterSim.Simulator()
self.sim.setCameraResolution(self.params['width'], self.params['height'])
self.sim.setCameraVFOV(self.params['vfov_RAD'])
self.sim.setDepthEnabled(self.params['depth'])
self.sim.initialize()
def initialize(self, scene_id, image_id, heading=0, elevation=0, reset_history=True):
self.scene_id = scene_id
self._init_sim_enviornment()
successful_init = True
try:
self.sim.newEpisode([scene_id], [image_id], [heading], [elevation])
except:
successful_init = True
if reset_history:
self._reset_history()
return successful_init
def _read_sensor_data(self, state):
sensor_data = {}
# Get RGB image
RGB_img = np.array(state.rgb, copy=False)[:,:, [2,1,0]]
if self.params['annotate_image']:
RGB_img = modules.vis_utils.annotate_locations_on_img(state.navigableLocations, RGB_img, params=self.params)
sensor_data['RGB'] = RGB_img.copy()
# Check for depth image
if self.params['depth']:
sensor_data['depth'] = np.array(state.depth, copy=False)
return sensor_data
def run(self, sim_step_i, location, image_id, heading_delta, elevation_delta, verbose=False, set_by='image_id'):
if set_by == 'location':
self.sim.makeAction([location], [heading_delta], [elevation_delta])
elif set_by == 'image_id':
if sim_step_i == 0:
self.heading = heading_delta
self.elevation = elevation_delta
else:
self.heading += heading_delta
self.heading %= 2*np.pi
self.elevation += elevation_delta
_ = self.initialize(self.scene_id, image_id, self.heading, self.elevation, reset_history=False)
else:
assert False, "Invalid set_by argument"
# Get state
assert len(self.sim.getState()) == 1, "More than 1 state!"
state = self.sim.getState()[0]
# Update state history
state_location = np.array([state.location.x, state.location.y, state.location.z])
self.history['location'].append(state_location)
# Process image
sensor_data = self._read_sensor_data(state)
# Stdout
if verbose:
stdout_str = f"\n\t[MatterSim] Location (x,y,z): "
stdout_str += f"({state.location.x:0.6f}, "
stdout_str += f"{state.location.y:0.6f}, "
stdout_str += f"{state.location.z:0.6f})"
print(stdout_str)
return state, sensor_data
def close(self, verbose=True):
if verbose:
print("\nClosing MatterportSim.")
try:
self.sim.close()
except Exception as e:
print(f"Error closing sim. \n{e}")
| [
"math.radians",
"MatterSim.Simulator",
"numpy.rad2deg",
"numpy.array"
] | [((784, 816), 'math.radians', 'math.radians', (["params['hfov_DEG']"], {}), "(params['hfov_DEG'])\n", (796, 816), False, 'import math\n'), ((931, 961), 'numpy.rad2deg', 'np.rad2deg', (["params['vfov_RAD']"], {}), "(params['vfov_RAD'])\n", (941, 961), True, 'import numpy as np\n'), ((2223, 2244), 'MatterSim.Simulator', 'MatterSim.Simulator', ([], {}), '()\n', (2242, 2244), False, 'import MatterSim\n'), ((4405, 4469), 'numpy.array', 'np.array', (['[state.location.x, state.location.y, state.location.z]'], {}), '([state.location.x, state.location.y, state.location.z])\n', (4413, 4469), True, 'import numpy as np\n'), ((3032, 3063), 'numpy.array', 'np.array', (['state.rgb'], {'copy': '(False)'}), '(state.rgb, copy=False)\n', (3040, 3063), True, 'import numpy as np\n'), ((3406, 3439), 'numpy.array', 'np.array', (['state.depth'], {'copy': '(False)'}), '(state.depth, copy=False)\n', (3414, 3439), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import time
import torch
from options.train_options import TrainOptions
from data.data_loader import CreateDataLoader
from models.customBM_models import ModelBuilder
from models.modelRatioMCOrigin2in_audioVisual import AudioVisualModel
from torch.autograd import Variable
from tensorboardX import SummaryWriter
import numpy as np
#used to display validation loss
def display_val(model, loss_criterion, writer, dataset_val, opt):
batch_loss = []
batch_mse_loss = []
batch_bm_loss = []
batch_phase_loss = []
with torch.no_grad():
for i, val_data in enumerate(dataset_val):
if i < opt.validation_batches:
output = model.forward(val_data)
mse_loss = loss_criterion(output['predicted_spectrogram'], output['audio_gt'])
bm_loss = loss_criterion(output['BM_pred'], output['BM_gt'])
loss=bm_loss+mse_loss # mse_loss+
batch_loss.append(loss.item())
batch_mse_loss.append(mse_loss.item())
batch_bm_loss.append(bm_loss.item())
else:
break
avg_loss = sum(batch_loss)/len(batch_loss)
avg_mse_loss = sum(batch_mse_loss)/len(batch_mse_loss)
avg_bm_loss = sum(batch_bm_loss)/len(batch_bm_loss)
if opt.tensorboard:
writer.add_scalar('data/val_loss', avg_loss)
writer.add_scalar('data/mse_loss', avg_mse_loss)
writer.add_scalar('data/bm__loss', avg_bm_loss)
print('val loss: %.3f' % avg_loss)
print('val mse loss: %.3f' % avg_mse_loss)
print('val bm loss: %.3f' % avg_bm_loss)
return avg_loss
def display_otherMetric(model, writer, dataset_val, opt):
eps=1e-8
eps2=1e-4
magBiases = []
logMagBiases = []
magBiases2 = []
logMagBiases2 = []
magPhaseErrors=[]
logMagPhaseErrors=[]
with torch.no_grad():
for i, val_data in enumerate(dataset_val):
if i < opt.validation_batches:
output = model.forward(val_data)
# B C F T
#print("shape of output:",output['BM_pred'].shape,output['BM_gt'].shape)
one=torch.ones(1).to(opt.device)
magM=((val_data['audio_input_spec'][:,0:1]**2+val_data['audio_input_spec'][:,1:]**2)[:,:,:-1]**0.5).to(opt.device)# input 0:1 1: prevent B C F t to be BFT
#print("max min magM",magM.max(),magM.min())
logMagM=torch.log1p(magM)
magHatD=(output['BM_pred'])
magGtD=(output['BM_gt'])
#print("shape of mag:",magM.shape,magHatD.shape,magGtD.shape)
#print("device:",one.device,magM.device,magHatD.device,magGtD.device)
FtBias=torch.zeros_like(magHatD)
FtBias[magHatD<=magGtD]=(one-magHatD/magGtD)[magHatD<=magGtD]
FtBias[magHatD>magGtD]=(magGtD/magHatD-one)[magHatD>magGtD]
logMagBias=torch.sum(logMagM*(FtBias),dim=[2,3])/torch.sum(logMagM,dim=[2,3])
#print("shape of weighted average bias",logMagBias.shape)
logMagBias=torch.mean(logMagBias)
#print("shape of mean of weighted average bias",logMagBias.shape)
magBias=torch.sum(magM*(FtBias),dim=[2,3])/torch.sum(magM,dim=[2,3])
magBias=torch.mean(magBias)
magBiases.append(magBias.item())
logMagBiases.append(logMagBias.item())
# for the phase error
logMagM=logMagM.cpu().numpy()
magM=magM.cpu().numpy()
# new version: ipd first then ipd error and error weight on |M|
LpR=val_data['audio_input_spec'][:,:,:-1].to(opt.device)
tL=LpR+output['predicted_spectrogram'] # M + MB
tR=LpR-output['predicted_spectrogram']
L=LpR+output['audio_gt']
R=LpR-output['audio_gt']
tL=tL.cpu().numpy()
tR=tR.cpu().numpy()
L=L.cpu().numpy()
R=R.cpu().numpy()
#print(LpR.shape,output['binaural_spectrogram'].shape)
tL_angle=np.angle(tL[:,0:1]+tL[:,1:]*1j)
L_angle=np.angle(L[:,0:1]+L[:,1:]*1j)
tR_angle=np.angle(tR[:,0:1]+tR[:,1:]*1j)
R_angle=np.angle(R[:,0:1]+R[:,1:]*1j)
#print(tL_angle.shape,output['binaural_spectrogram'].shape)
# ((32, 1, 256, 64), (32, 2, 256, 64))
# IPD has positive or negative 0.75pi - (0.75pi) = 1.5 pi but it is -0.5pi; pi - - pi= 2pi but should 0
# -0.95 - 0.95= -1.9 --> 0.1 0.8 - -0.8=1.6
tIPD=tL_angle-tR_angle
tIPD[tIPD<-np.pi]=2*np.pi+tIPD[tIPD<-np.pi]
tIPD[tIPD>np.pi]=tIPD[tIPD>np.pi]-2*np.pi
IPD=L_angle-R_angle
IPD[IPD<-np.pi]=2*np.pi+IPD[IPD<-np.pi]
IPD[IPD>np.pi]=IPD[IPD>np.pi]-2*np.pi
phase_error=np.abs(IPD-tIPD)
phase_error[phase_error>np.pi]=2*np.pi-phase_error[phase_error>np.pi]
#print(tL.shape,tL_angle.shape,tIPD.shape,phase_error.shape)
#((32, 2, 256, 64), (32, 1, 256, 64), (32, 1, 256, 64), (32, 1, 256, 64))
logMagPhaseError=np.sum(logMagM*(phase_error),axis=(2,3))/np.sum(logMagM,axis=(2,3))
#print("shape of weighted average bias",logMagBias.shape)
logMagPhaseError=np.mean(logMagPhaseError)
#print("shape of mean of weighted average bias",logMagBias.shape)
magPhaseError=np.sum(magM*(phase_error),axis=(2,3))/np.sum(magM,axis=(2,3))
magPhaseError=np.mean(magPhaseError)
magPhaseErrors.append(magPhaseError)
logMagPhaseErrors.append(logMagPhaseError)
else:
break
avg_loss = sum(magBiases)/len(magBiases)
avg_logloss = sum(logMagBiases)/len(logMagBiases)
avg_phase_error = sum(magPhaseErrors)/len(magPhaseErrors)
avg_logphase_error = sum(logMagPhaseErrors)/len(logMagPhaseErrors)
if opt.tensorboard:
writer.add_scalar('data/val_loss', avg_loss)
print('bias log: %.3f' % avg_logloss)
print('bias: %.3f' % avg_loss)
print('phase log: %.3f' % avg_logphase_error)
print('phase: %.3f' % avg_phase_error)
return avg_loss
#parse arguments
opt = TrainOptions().parse()
opt.device = torch.device("cuda")
#create validation set data loader if validation_on option is set
if opt.validation_on:
#temperally set to val to load val data
opt.mode = 'val'
data_loader_val = CreateDataLoader(opt)
dataset_val = data_loader_val.load_data()
dataset_size_val = len(data_loader_val)
print('#validation clips = %d' % dataset_size_val)
opt.mode = 'train' #set it back
if opt.tensorboard:
from tensorboardX import SummaryWriter
writer = SummaryWriter(comment=opt.name)
else:
writer = None
# network builders
builder = ModelBuilder()
net_visual = builder.build_visual(weights=opt.weights_visual)
net_audio = builder.build_audio(
ngf=opt.unet_ngf,
input_nc=opt.unet_input_nc,
output_nc=opt.unet_output_nc,
weights=opt.weights_audio,relu=True,sigmoid=False,batch_norm=True)
nets = (net_visual, net_audio)
# construct our audio-visual model
model = AudioVisualModel(nets, opt)
model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids)
model.to(opt.device)
# set up loss function
loss_criterion = torch.nn.MSELoss()
if(len(opt.gpu_ids) > 0):
loss_criterion.cuda(opt.gpu_ids[0])
print("begin evaluation")
model.eval()
opt.mode = 'val'
display_otherMetric(model, writer, dataset_val, opt)
val_err = display_val(model, loss_criterion, writer, dataset_val, opt)
| [
"torch.mean",
"torch.ones",
"torch.nn.MSELoss",
"tensorboardX.SummaryWriter",
"numpy.abs",
"numpy.sum",
"torch.zeros_like",
"torch.sum",
"numpy.angle",
"data.data_loader.CreateDataLoader",
"torch.log1p",
"numpy.mean",
"torch.device",
"options.train_options.TrainOptions",
"torch.nn.DataPa... | [((6703, 6723), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (6715, 6723), False, 'import torch\n'), ((7267, 7281), 'models.customBM_models.ModelBuilder', 'ModelBuilder', ([], {}), '()\n', (7279, 7281), False, 'from models.customBM_models import ModelBuilder\n'), ((7627, 7654), 'models.modelRatioMCOrigin2in_audioVisual.AudioVisualModel', 'AudioVisualModel', (['nets', 'opt'], {}), '(nets, opt)\n', (7643, 7654), False, 'from models.modelRatioMCOrigin2in_audioVisual import AudioVisualModel\n'), ((7663, 7715), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {'device_ids': 'opt.gpu_ids'}), '(model, device_ids=opt.gpu_ids)\n', (7684, 7715), False, 'import torch\n'), ((7780, 7798), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (7796, 7798), False, 'import torch\n'), ((6901, 6922), 'data.data_loader.CreateDataLoader', 'CreateDataLoader', (['opt'], {}), '(opt)\n', (6917, 6922), False, 'from data.data_loader import CreateDataLoader\n'), ((7181, 7212), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {'comment': 'opt.name'}), '(comment=opt.name)\n', (7194, 7212), False, 'from tensorboardX import SummaryWriter\n'), ((764, 779), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (777, 779), False, 'import torch\n'), ((2072, 2087), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2085, 2087), False, 'import torch\n'), ((6667, 6681), 'options.train_options.TrainOptions', 'TrainOptions', ([], {}), '()\n', (6679, 6681), False, 'from options.train_options import TrainOptions\n'), ((2653, 2670), 'torch.log1p', 'torch.log1p', (['magM'], {}), '(magM)\n', (2664, 2670), False, 'import torch\n'), ((2943, 2968), 'torch.zeros_like', 'torch.zeros_like', (['magHatD'], {}), '(magHatD)\n', (2959, 2968), False, 'import torch\n'), ((3319, 3341), 'torch.mean', 'torch.mean', (['logMagBias'], {}), '(logMagBias)\n', (3329, 3341), False, 'import torch\n'), ((3533, 3552), 'torch.mean', 'torch.mean', (['magBias'], {}), '(magBias)\n', (3543, 3552), False, 'import torch\n'), ((4380, 4419), 'numpy.angle', 'np.angle', (['(tL[:, 0:1] + tL[:, 1:] * 1.0j)'], {}), '(tL[:, 0:1] + tL[:, 1:] * 1.0j)\n', (4388, 4419), True, 'import numpy as np\n'), ((4436, 4473), 'numpy.angle', 'np.angle', (['(L[:, 0:1] + L[:, 1:] * 1.0j)'], {}), '(L[:, 0:1] + L[:, 1:] * 1.0j)\n', (4444, 4473), True, 'import numpy as np\n'), ((4491, 4530), 'numpy.angle', 'np.angle', (['(tR[:, 0:1] + tR[:, 1:] * 1.0j)'], {}), '(tR[:, 0:1] + tR[:, 1:] * 1.0j)\n', (4499, 4530), True, 'import numpy as np\n'), ((4547, 4584), 'numpy.angle', 'np.angle', (['(R[:, 0:1] + R[:, 1:] * 1.0j)'], {}), '(R[:, 0:1] + R[:, 1:] * 1.0j)\n', (4555, 4584), True, 'import numpy as np\n'), ((5233, 5251), 'numpy.abs', 'np.abs', (['(IPD - tIPD)'], {}), '(IPD - tIPD)\n', (5239, 5251), True, 'import numpy as np\n'), ((5730, 5755), 'numpy.mean', 'np.mean', (['logMagPhaseError'], {}), '(logMagPhaseError)\n', (5737, 5755), True, 'import numpy as np\n'), ((5960, 5982), 'numpy.mean', 'np.mean', (['magPhaseError'], {}), '(magPhaseError)\n', (5967, 5982), True, 'import numpy as np\n'), ((3151, 3190), 'torch.sum', 'torch.sum', (['(logMagM * FtBias)'], {'dim': '[2, 3]'}), '(logMagM * FtBias, dim=[2, 3])\n', (3160, 3190), False, 'import torch\n'), ((3189, 3219), 'torch.sum', 'torch.sum', (['logMagM'], {'dim': '[2, 3]'}), '(logMagM, dim=[2, 3])\n', (3198, 3219), False, 'import torch\n'), ((3448, 3484), 'torch.sum', 'torch.sum', (['(magM * FtBias)'], {'dim': '[2, 3]'}), '(magM * FtBias, dim=[2, 3])\n', (3457, 3484), False, 'import torch\n'), ((3483, 3510), 'torch.sum', 'torch.sum', (['magM'], {'dim': '[2, 3]'}), '(magM, dim=[2, 3])\n', (3492, 3510), False, 'import torch\n'), ((5555, 5597), 'numpy.sum', 'np.sum', (['(logMagM * phase_error)'], {'axis': '(2, 3)'}), '(logMagM * phase_error, axis=(2, 3))\n', (5561, 5597), True, 'import numpy as np\n'), ((5596, 5624), 'numpy.sum', 'np.sum', (['logMagM'], {'axis': '(2, 3)'}), '(logMagM, axis=(2, 3))\n', (5602, 5624), True, 'import numpy as np\n'), ((5868, 5907), 'numpy.sum', 'np.sum', (['(magM * phase_error)'], {'axis': '(2, 3)'}), '(magM * phase_error, axis=(2, 3))\n', (5874, 5907), True, 'import numpy as np\n'), ((5906, 5931), 'numpy.sum', 'np.sum', (['magM'], {'axis': '(2, 3)'}), '(magM, axis=(2, 3))\n', (5912, 5931), True, 'import numpy as np\n'), ((2367, 2380), 'torch.ones', 'torch.ones', (['(1)'], {}), '(1)\n', (2377, 2380), False, 'import torch\n')] |
import numpy as np
import cv2
def obj_read(mesh,texture_normal=None):
obj_file = open(mesh,'r')
vertices = []
vertex_colors = []
vertex_color_uv = []
faces = []
vertex_text_map = []
new_vertex_color_uv = []
vertex_colors_normal = []
vertex_no = 0;
vertex_text_no = 0;
faces_no = 0;
for line in obj_file:
if line[0] == "#" or line == "":
continue;
subline = line.split()
if subline == []:
continue;
if subline[0] == "v":
vertices.append([float(subline[1]), float(subline[2]), float(subline[3])])
if len(subline)>4:
vertex_colors.append([float(subline[4]), float(subline[5]), float(subline[6])])
vertex_no = vertex_no+1
elif subline[0] == "vt":
vertex_color_uv.append([float(subline[1]), float(subline[2])])
vertex_text_no = vertex_text_no + 1
elif subline[0] == "f":
sub1 = subline[1].split('/')
sub2 = subline[2].split('/')
sub3 = subline[3].split('/')
faces.append([int(float(sub1[0]))-1,int(float(sub2[0]))-1,int(float(sub3[0]))-1])
if len(sub1) > 1:
if not sub1[1] == "":
if len(sub1)>1:
vertex_text_map.append([int(sub1[0])-1,int(sub1[1])-1])
if len(sub2)>1:
vertex_text_map.append([int(sub2[0])-1,int(sub2[1])-1])
if len(sub3)>1:
vertex_text_map.append([int(sub3[0])-1,int(sub3[1])-1])
faces_no = faces_no + 1
obj_file.close()
vertices = np.array(vertices)
faces = np.array(faces)
vertex_colors = np.array(vertex_colors)
if len(vertex_color_uv)>0:
vertex_color_uv = np.array(vertex_color_uv)
new_vertex_color_uv = np.zeros((vertex_color_uv.shape[0],vertex_color_uv.shape[1]))
if len(vertex_text_map)>0:
vertex_text_map = np.array(vertex_text_map)
for i in range(vertex_text_map.shape[0]):
new_vertex_color_uv[vertex_text_map[i,0],:] = vertex_color_uv[vertex_text_map[i,1],:]
if texture_normal is not None:
vertex_colors_normal = fetch_colors(cv2.imread(texture_normal),new_vertex_color_uv)
vertex_colors_normal = vertex_colors_normal[0:vertices.shape[0],:]
return vertices, vertex_colors, faces, new_vertex_color_uv, vertex_colors_normal
def read_all_obj(mesh,texture_normal=None):
obj_file = open(mesh,'r')
vertices = []
vertex_colors = []
vertex_color_uv = []
vertex_normal = []
faces = []
facesText = []
facesNorm = []
otherLines = []
vertex_no = 0;
vertex_norm_no = 0;
vertex_text_no = 0;
faces_no = 0;
for line in obj_file:
if not len(line) == 1:
subline = line.split()
else:
subline = line
continue
if subline[0] == "v":
vertices.append([float(subline[1]), float(subline[2]), float(subline[3])])
if len(subline)>4:
vertex_colors.append([float(subline[4]), float(subline[5]), float(subline[6])])
vertex_no = vertex_no+1
elif subline[0] == "vn":
vertex_normal.append([float(subline[1]), float(subline[2]), float(subline[3])])
vertex_norm_no = vertex_norm_no + 1
elif subline[0] == "vt":
vertex_color_uv.append([float(subline[1]), float(subline[2])])
vertex_text_no = vertex_text_no + 1
elif subline[0] == "f":
sub1 = subline[1].split('/')
sub2 = subline[2].split('/')
sub3 = subline[3].split('/')
faces.append([int(sub1[0])-1,int(sub2[0])-1,int(sub3[0])-1])
facesText.append([int(sub1[1])-1,int(sub2[1])-1,int(sub3[1])-1])
facesNorm.append([int(sub1[2])-1,int(sub2[2])-1,int(sub3[2])-1])
faces_no = faces_no + 1
else:
otherLines.append(line)
obj_file.close()
vertices = np.array(vertices)
vertex_colors = np.array(vertex_colors)
vertex_normal = np.array(vertex_normal)
vertex_color_uv = np.array(vertex_color_uv)
faces = np.array(faces)
facesText = np.array(facesText)
facesNorm = np.array(facesNorm)
if vertex_color_uv.shape[0] == 0:
print(vertex_color_uv.shape)
raise Exception("Error Reading Obj")
new_vertex_color_uv = vertex_color_uv
return vertices, vertex_color_uv, vertex_normal, faces, facesText, facesNorm, otherLines
def obj_write(filename, vertices, uvs=None, normals=None, faces=None, facesText=None, facesNorm=None, otherLines=None):
meshfile = open(filename,'w+')
if otherLines is not None:
for i in range(len(otherLines)):
writestr = otherLines[i]
writestr = writestr + "\n"
meshfile.write(writestr)
for i in range(vertices.shape[0]):
writestr = "v"
for j in range(vertices.shape[1]):
writestr = writestr + " " + str(vertices[i,j])
writestr = writestr + "\n"
meshfile.write(writestr)
if normals is not None:
for i in range(normals.shape[0]):
writestr = "vn"
for j in range(normals.shape[1]):
writestr = writestr + " " + str(normals[i,j])
writestr = writestr + "\n"
meshfile.write(writestr)
if uvs is not None:
for i in range(uvs.shape[0]):
writestr = "vt"
for j in range(uvs.shape[1]):
writestr = writestr + " " + str(uvs[i,j])
writestr = writestr + "\n"
meshfile.write(writestr)
if faces is not None:
for i in range(faces.shape[0]):
writestr = "f"
flag = 0
for j in range(faces.shape[1]):
if faces[i,j] == -1:
flag = 1
if (facesText is not None) and (facesNorm is not None):
writestr = writestr + " " + str(faces[i,j] + 1)+ "/" + str(facesText[i,j] + 1) + "/" + str(facesNorm[i,j] + 1)
else:
writestr = writestr + " " + str(faces[i,j] + 1)
writestr = writestr + "\n"
if flag == 0:
meshfile.write(writestr)
meshfile.close()
def fetch_colors(texture, uvs):
rows = texture.shape[0]
cols = texture.shape[1]
#get the colors of each vertices
# uv starts from bottom row first column
# rows is y/v and columns is x/u
u = np.clip(np.round_(rows*(uvs[:,0])).astype('int'),0,rows-1)
v = np.clip(np.round_(cols*(1-uvs[:,1])).astype('int'),0,cols-1)
# if u >= cols:
# print("Bad U")
# u = cols-1
# if v >= rows:
# print("Bad V")
# v = rows-1
colors = np.flip(texture[v,u],axis=1)
# rows = texture.shape[0]
# cols = texture.shape[1]
# colors = np.zeros((uvs.shape[0],3))
# #get the colors of each vertices
# for i in range(uvs.shape[0]):
# # uv starts from bottom row first column
# # rows is y/v and columns is x/u
# u = int(rows*(uvs[i,0]))
# v = int(cols*(1-uvs[i,1]))
# if u >= cols:
# print("Bad U")
# u = cols-1
# if v >= rows:
# print("Bad V")
# v = rows-1
# colors[i,:] = np.flip(texture[v,u])
return colors | [
"numpy.flip",
"numpy.zeros",
"cv2.imread",
"numpy.array",
"numpy.round_"
] | [((1711, 1729), 'numpy.array', 'np.array', (['vertices'], {}), '(vertices)\n', (1719, 1729), True, 'import numpy as np\n'), ((1743, 1758), 'numpy.array', 'np.array', (['faces'], {}), '(faces)\n', (1751, 1758), True, 'import numpy as np\n'), ((1780, 1803), 'numpy.array', 'np.array', (['vertex_colors'], {}), '(vertex_colors)\n', (1788, 1803), True, 'import numpy as np\n'), ((4139, 4157), 'numpy.array', 'np.array', (['vertices'], {}), '(vertices)\n', (4147, 4157), True, 'import numpy as np\n'), ((4179, 4202), 'numpy.array', 'np.array', (['vertex_colors'], {}), '(vertex_colors)\n', (4187, 4202), True, 'import numpy as np\n'), ((4224, 4247), 'numpy.array', 'np.array', (['vertex_normal'], {}), '(vertex_normal)\n', (4232, 4247), True, 'import numpy as np\n'), ((4271, 4296), 'numpy.array', 'np.array', (['vertex_color_uv'], {}), '(vertex_color_uv)\n', (4279, 4296), True, 'import numpy as np\n'), ((4310, 4325), 'numpy.array', 'np.array', (['faces'], {}), '(faces)\n', (4318, 4325), True, 'import numpy as np\n'), ((4343, 4362), 'numpy.array', 'np.array', (['facesText'], {}), '(facesText)\n', (4351, 4362), True, 'import numpy as np\n'), ((4380, 4399), 'numpy.array', 'np.array', (['facesNorm'], {}), '(facesNorm)\n', (4388, 4399), True, 'import numpy as np\n'), ((6988, 7018), 'numpy.flip', 'np.flip', (['texture[v, u]'], {'axis': '(1)'}), '(texture[v, u], axis=1)\n', (6995, 7018), True, 'import numpy as np\n'), ((1863, 1888), 'numpy.array', 'np.array', (['vertex_color_uv'], {}), '(vertex_color_uv)\n', (1871, 1888), True, 'import numpy as np\n'), ((1920, 1982), 'numpy.zeros', 'np.zeros', (['(vertex_color_uv.shape[0], vertex_color_uv.shape[1])'], {}), '((vertex_color_uv.shape[0], vertex_color_uv.shape[1]))\n', (1928, 1982), True, 'import numpy as np\n'), ((2041, 2066), 'numpy.array', 'np.array', (['vertex_text_map'], {}), '(vertex_text_map)\n', (2049, 2066), True, 'import numpy as np\n'), ((2298, 2324), 'cv2.imread', 'cv2.imread', (['texture_normal'], {}), '(texture_normal)\n', (2308, 2324), False, 'import cv2\n'), ((6715, 6742), 'numpy.round_', 'np.round_', (['(rows * uvs[:, 0])'], {}), '(rows * uvs[:, 0])\n', (6724, 6742), True, 'import numpy as np\n'), ((6783, 6816), 'numpy.round_', 'np.round_', (['(cols * (1 - uvs[:, 1]))'], {}), '(cols * (1 - uvs[:, 1]))\n', (6792, 6816), True, 'import numpy as np\n')] |
# Preppin' Data 2021 Week 25
import pandas as pd
import numpy as np
# Load data
gen_1 = pd.read_excel('unprepped_data\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx', sheet_name='Gen 1')
evolution_group = pd.read_excel('unprepped_data\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx', sheet_name='Evolution Group')
evolutions = pd.read_excel('unprepped_data\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx', sheet_name='Evolutions')
mega_evolutions = pd.read_excel('unprepped_data\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx', sheet_name='Mega Evolutions')
alolan = pd.read_excel('unprepped_data\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx', sheet_name='Alolan')
galarian = pd.read_excel('unprepped_data\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx', sheet_name='Galarian')
gigantamax = pd.read_excel('unprepped_data\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx', sheet_name='Gigantamax')
unattainable = pd.read_excel('unprepped_data\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx', sheet_name='Unattainable in Sword & Shield')
anime = pd.read_excel('unprepped_data\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx', sheet_name='Anime Appearances')
# Clean up the list of Gen 1 Pokémon so we have 1 row per Pokémon
gen_1 = gen_1.loc[gen_1.Name.notnull()]
gen_1['#'] = np.int64(gen_1['#'])
# Clean up the Evolution Group input so that we can join it to the Gen 1 list
evolution_group['#'] = np.int64(evolution_group['#'])
evol_lookup = evolution_group[['Evolution Group','#']]
evolution_group_df = evolution_group
del evolution_group_df['Evolution Group']
evolution_group_df = evolution_group_df.drop_duplicates()
gen_1_df = pd.merge(gen_1,evolution_group_df, on = '#', how = 'inner')
# Filter out Starter and Legendary Pokémon
gen_1_df = gen_1_df.loc[gen_1_df['Starter?'] == 0]
gen_1_df = gen_1_df.loc[gen_1_df['Legendary?'] == 0]
# Using the Evolutions input, exclude any Pokémon that evolves from a Pokémon that is not part of Gen 1 or can evolve into a Pokémon outside of Gen 1
evolutions_df = evolutions[evolutions['Evolving to'].isin(gen_1_df['Name'])]
evolutions_df = evolutions_df[evolutions_df['Evolving from'].isin(gen_1_df['Name'])]
# create list of evolving to and from
keep = list(evolutions_df['Evolving from'])
keep_2 = list(evolutions_df['Evolving to'])
keep.extend(keep_2)
keep = list(set(keep))
gen_1_df = gen_1_df[gen_1_df['Name'].isin(keep)]
# Exclude any Pokémon with a mega evolution, Alolan, Galarian or Gigantamax form
exclude_df = pd.concat([mega_evolutions,alolan,galarian,gigantamax])
exclude_df['Name'] = exclude_df['Name'].str.replace('^(Mega|Alolan|Galarian|Gigantamax) ','',regex = True)
exclude_df['Name'] = exclude_df['Name'].str.replace(' (X|Y)$','',regex = True)
# find any pokemon that evolves into a mega / gigantamax
id_lookup = gen_1_df[['#','Name']]
exclude_df = pd.merge(exclude_df,id_lookup, on = 'Name', how = 'inner')
exclude_df = pd.merge(exclude_df,evol_lookup, on = '#', how = 'inner')
del exclude_df['#']
exclude_df = pd.merge(exclude_df,evol_lookup, on = 'Evolution Group', how = 'inner')
# exclude pokemon that can mega evolve etc.
gen_1_df = gen_1_df[~gen_1_df['#'].isin(exclude_df['#'])]
# It's not possible to catch certain Pokémon in the most recent games. These are the only ones we will consider from this point on
gen_1_df = gen_1_df[gen_1_df['Name'].isin(list(unattainable['Name']))]
# We're left with 10 evolution groups. Rank them in ascending order of how many times they've appeared in the anime to see who the worst Pokémon is!
# convert anime to evolution groups
anime_df = pd.merge(anime,id_lookup, left_on = 'Pokemon', right_on = 'Name', how = 'inner')
anime_df = pd.merge(anime_df,evol_lookup, on = '#', how = 'inner')
# count appearances
appearences = anime_df[anime_df['Evolution Group'].isin(list(gen_1_df['Name']))]
appearences = appearences[['Evolution Group','Episode']].drop_duplicates()
appearences = appearences.groupby(['Evolution Group'],as_index=False).count()
appearences.columns = ['Evolution Group','Appearances']
# create rank
appearences['Worst Pokémon'] = appearences['Appearances'].rank(ascending=True)
appearences['Worst Pokémon'] = appearences['Worst Pokémon'].astype(int)
# Output the data
appearences = appearences.sort_values(by='Worst Pokémon', ascending=True).reset_index()
appearences = appearences[['Worst Pokémon','Evolution Group','Appearances']]
appearences.to_csv('prepped_data\\PD 2021 Wk 25 Output.csv', encoding="utf-8-sig", index=False)
print("data prepped!")
| [
"pandas.read_excel",
"pandas.merge",
"pandas.concat",
"numpy.int64"
] | [((89, 186), 'pandas.read_excel', 'pd.read_excel', (['"""unprepped_data\\\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx"""'], {'sheet_name': '"""Gen 1"""'}), "('unprepped_data\\\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx',\n sheet_name='Gen 1')\n", (102, 186), True, 'import pandas as pd\n'), ((201, 308), 'pandas.read_excel', 'pd.read_excel', (['"""unprepped_data\\\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx"""'], {'sheet_name': '"""Evolution Group"""'}), "('unprepped_data\\\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx',\n sheet_name='Evolution Group')\n", (214, 308), True, 'import pandas as pd\n'), ((318, 420), 'pandas.read_excel', 'pd.read_excel', (['"""unprepped_data\\\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx"""'], {'sheet_name': '"""Evolutions"""'}), "('unprepped_data\\\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx',\n sheet_name='Evolutions')\n", (331, 420), True, 'import pandas as pd\n'), ((435, 542), 'pandas.read_excel', 'pd.read_excel', (['"""unprepped_data\\\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx"""'], {'sheet_name': '"""Mega Evolutions"""'}), "('unprepped_data\\\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx',\n sheet_name='Mega Evolutions')\n", (448, 542), True, 'import pandas as pd\n'), ((548, 646), 'pandas.read_excel', 'pd.read_excel', (['"""unprepped_data\\\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx"""'], {'sheet_name': '"""Alolan"""'}), "('unprepped_data\\\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx',\n sheet_name='Alolan')\n", (561, 646), True, 'import pandas as pd\n'), ((654, 754), 'pandas.read_excel', 'pd.read_excel', (['"""unprepped_data\\\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx"""'], {'sheet_name': '"""Galarian"""'}), "('unprepped_data\\\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx',\n sheet_name='Galarian')\n", (667, 754), True, 'import pandas as pd\n'), ((764, 866), 'pandas.read_excel', 'pd.read_excel', (['"""unprepped_data\\\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx"""'], {'sheet_name': '"""Gigantamax"""'}), "('unprepped_data\\\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx',\n sheet_name='Gigantamax')\n", (777, 866), True, 'import pandas as pd\n'), ((878, 1000), 'pandas.read_excel', 'pd.read_excel', (['"""unprepped_data\\\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx"""'], {'sheet_name': '"""Unattainable in Sword & Shield"""'}), "('unprepped_data\\\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx',\n sheet_name='Unattainable in Sword & Shield')\n", (891, 1000), True, 'import pandas as pd\n'), ((1005, 1114), 'pandas.read_excel', 'pd.read_excel', (['"""unprepped_data\\\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx"""'], {'sheet_name': '"""Anime Appearances"""'}), "('unprepped_data\\\\PD 2021 Wk 25 Input - 2021W25 Input.xlsx',\n sheet_name='Anime Appearances')\n", (1018, 1114), True, 'import pandas as pd\n'), ((1231, 1251), 'numpy.int64', 'np.int64', (["gen_1['#']"], {}), "(gen_1['#'])\n", (1239, 1251), True, 'import numpy as np\n'), ((1355, 1385), 'numpy.int64', 'np.int64', (["evolution_group['#']"], {}), "(evolution_group['#'])\n", (1363, 1385), True, 'import numpy as np\n'), ((1591, 1647), 'pandas.merge', 'pd.merge', (['gen_1', 'evolution_group_df'], {'on': '"""#"""', 'how': '"""inner"""'}), "(gen_1, evolution_group_df, on='#', how='inner')\n", (1599, 1647), True, 'import pandas as pd\n'), ((2427, 2485), 'pandas.concat', 'pd.concat', (['[mega_evolutions, alolan, galarian, gigantamax]'], {}), '([mega_evolutions, alolan, galarian, gigantamax])\n', (2436, 2485), True, 'import pandas as pd\n'), ((2776, 2831), 'pandas.merge', 'pd.merge', (['exclude_df', 'id_lookup'], {'on': '"""Name"""', 'how': '"""inner"""'}), "(exclude_df, id_lookup, on='Name', how='inner')\n", (2784, 2831), True, 'import pandas as pd\n'), ((2848, 2902), 'pandas.merge', 'pd.merge', (['exclude_df', 'evol_lookup'], {'on': '"""#"""', 'how': '"""inner"""'}), "(exclude_df, evol_lookup, on='#', how='inner')\n", (2856, 2902), True, 'import pandas as pd\n'), ((2939, 3007), 'pandas.merge', 'pd.merge', (['exclude_df', 'evol_lookup'], {'on': '"""Evolution Group"""', 'how': '"""inner"""'}), "(exclude_df, evol_lookup, on='Evolution Group', how='inner')\n", (2947, 3007), True, 'import pandas as pd\n'), ((3515, 3590), 'pandas.merge', 'pd.merge', (['anime', 'id_lookup'], {'left_on': '"""Pokemon"""', 'right_on': '"""Name"""', 'how': '"""inner"""'}), "(anime, id_lookup, left_on='Pokemon', right_on='Name', how='inner')\n", (3523, 3590), True, 'import pandas as pd\n'), ((3607, 3659), 'pandas.merge', 'pd.merge', (['anime_df', 'evol_lookup'], {'on': '"""#"""', 'how': '"""inner"""'}), "(anime_df, evol_lookup, on='#', how='inner')\n", (3615, 3659), True, 'import pandas as pd\n')] |
import numpy as np
import pytest
from pydantic import ValidationError
from bigearthnet_patch_interface.band_interface import *
def test_band_shape_validator():
with pytest.raises(ValueError):
Band(name="B01", spatial_resolution=10, data=np.array([1]), data_shape=((2, 1)))
@pytest.mark.parametrize(
"invalid_data",
[
[[1]],
((1)),
1,
"1",
],
)
def test_data_validation(invalid_data):
with pytest.raises(ValidationError):
Band(name="B01", spatial_resolution=10, data=invalid_data, data_shape=(1, 1))
def test_str_representation():
name = "B01"
sp = 10
b = Band(name=name, spatial_resolution=sp, data=np.array([[1]]), data_shape=(1, 1))
assert name in str(b) and str(sp) in str(b)
@pytest.mark.parametrize(
"invalid_name",
[
"wrong_name",
"B09",
"B01",
"B8A",
],
)
def test_10m_name_validation(invalid_name):
with pytest.raises(ValueError):
BenS2_10mBand(name=invalid_name)
@pytest.mark.parametrize(
"invalid_name",
[
"wrong_name",
"B02",
"B03",
"B09",
],
)
def test_20m_name_validation(invalid_name):
with pytest.raises(ValueError):
BenS2_20mBand(name=invalid_name)
@pytest.mark.parametrize(
"invalid_name",
[
"wrong_name",
"B01",
"B02",
"B11",
],
)
def test_10m_name_validation(invalid_name):
with pytest.raises(ValueError):
BenS2_60mBand(name=invalid_name)
@pytest.mark.parametrize(
"invalid_name",
[
"wrong_name",
"B01",
"B02",
"B06",
],
)
def test_s1_name_validation(invalid_name):
with pytest.raises(ValueError):
BenS1_Band(name=invalid_name)
@pytest.mark.parametrize(
"invalid_name",
[
"wrong_name",
"B01",
"B02",
"B06",
],
)
def test_s1_name_validation(invalid_name):
with pytest.raises(ValueError):
BenS1_Band(name=invalid_name)
| [
"pytest.mark.parametrize",
"pytest.raises",
"numpy.array"
] | [((291, 350), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""invalid_data"""', "[[[1]], 1, 1, '1']"], {}), "('invalid_data', [[[1]], 1, 1, '1'])\n", (314, 350), False, 'import pytest\n'), ((773, 849), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""invalid_name"""', "['wrong_name', 'B09', 'B01', 'B8A']"], {}), "('invalid_name', ['wrong_name', 'B09', 'B01', 'B8A'])\n", (796, 849), False, 'import pytest\n'), ((1024, 1100), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""invalid_name"""', "['wrong_name', 'B02', 'B03', 'B09']"], {}), "('invalid_name', ['wrong_name', 'B02', 'B03', 'B09'])\n", (1047, 1100), False, 'import pytest\n'), ((1275, 1351), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""invalid_name"""', "['wrong_name', 'B01', 'B02', 'B11']"], {}), "('invalid_name', ['wrong_name', 'B01', 'B02', 'B11'])\n", (1298, 1351), False, 'import pytest\n'), ((1526, 1602), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""invalid_name"""', "['wrong_name', 'B01', 'B02', 'B06']"], {}), "('invalid_name', ['wrong_name', 'B01', 'B02', 'B06'])\n", (1549, 1602), False, 'import pytest\n'), ((1773, 1849), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""invalid_name"""', "['wrong_name', 'B01', 'B02', 'B06']"], {}), "('invalid_name', ['wrong_name', 'B01', 'B02', 'B06'])\n", (1796, 1849), False, 'import pytest\n'), ((172, 197), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (185, 197), False, 'import pytest\n'), ((454, 484), 'pytest.raises', 'pytest.raises', (['ValidationError'], {}), '(ValidationError)\n', (467, 484), False, 'import pytest\n'), ((953, 978), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (966, 978), False, 'import pytest\n'), ((1204, 1229), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1217, 1229), False, 'import pytest\n'), ((1455, 1480), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1468, 1480), False, 'import pytest\n'), ((1705, 1730), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1718, 1730), False, 'import pytest\n'), ((1952, 1977), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1965, 1977), False, 'import pytest\n'), ((686, 701), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (694, 701), True, 'import numpy as np\n'), ((252, 265), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (260, 265), True, 'import numpy as np\n')] |
from classes import Point, Cluster
import serial
import json
import pandas as pd
import numpy as np
import time
import glob
import matplotlib.pyplot as plt
import re
def find_serial_port(man=""):
if len(man) != 0:
return man
ports = glob.glob("/dev/tty.usb*")
print(ports)
if len(ports) > 1:
print("More than 1 port found")
for port in ports:
if "1434" in port:
return port
return ports[0]
# d = {key : [] for key in range(1, NUM_NODES + 1)} #empty dictionary that contains id of mobile nodes
d = {1 : [], 2 : [], 4 : [], 5 : [], 7321 : []}
file_path = "knn(2,1).csv"
def save_data(json_out, lim=150):
"""
for ML training
"""
# global d
id_list = [int(d) for d in str(json_out["static_ids"])]
rssi_list = [[pkg["rssi"]] for pkg in json_out['data']]
for i, key in enumerate(id_list):
d[key].append(rssi_list[i][0])
sts = all([len(d[key]) >= lim for key in d])
if sts is True: #check if all nodes have enough data
#skim all columns to the same size
for key in d:
d[key] = d[key][:lim]
df = pd.DataFrame(data=d)
df.to_csv(file_path, index=False)
print("FILE WRITTEN, CLOSING SERIAL COMM...")
return False
return True
def json_process(json_out):
data = json_out["frame"]
Xs = [i['x'] for i in data]
Ys = [i['y'] for i in data]
cluster_in = np.column_stack((Xs, Ys))
cluster = Cluster(cluster_in, eps=0.35, min_samples=3)
cluster.plot(fig=plt)
# plt.scatter(Xs, Ys)
plt.xlim(-10, 10)
plt.ylim(-0.9, 18)
# plt.set_xbound(lower=xmin, upper=xmax)
# plt.set_ybound(lower=ymin, upper=ymax)
plt.pause(0.00000001)
plt.clf()
# grp = []
# for pnt in data:
# p = Point(x=pnt['x'], y=pnt['y'])
# grp.append(p)
if __name__ == "__main__":
# 7-bit C1 ANSI sequences
ansi_escape = re.compile(r'''
\x1B # ESC
(?: # 7-bit C1 Fe (except CSI)
[@-Z\\-_]
| # or [ for CSI, followed by a control sequence
\[
[0-?]* # Parameter bytes
[ -/]* # Intermediate bytes
[@-~] # Final byte
)
''', re.VERBOSE)
serial_conn = serial.Serial(port=find_serial_port(man=""), baudrate = 115200) #SensorTag
plt.figure()
def read_loop(skip=1): #skip every {skip} values, 1 is no skip
json_ready = False
recv_cnt = 0
read_data = True
while read_data:
try:
byte_data = serial_conn.readline()
recv_cnt += 1
if recv_cnt % skip != 0:
continue
if recv_cnt == skip:
recv_cnt = 0
data = byte_data.decode('utf-8')
tmp_data = data.strip()
data = ''.join(tmp_data)
# print(data)
if not json_ready:
if "[JS_GUD]" in data:
json_ready = True
else:
data = ansi_escape.sub('', data)
json_out = json.loads(data)
json_process(json_out)
# read_data = save_data(json_out)
json_ready = False
except Exception as e:
json_ready = False
print("Exception:", e)
pass
# close serial port
print("close serial port")
serial_conn.close()
for i in range(3):
print("COUNT DOWN ", 3 - i)
time.sleep(0.1)
print("STARTING...")
read_loop()
| [
"pandas.DataFrame",
"matplotlib.pyplot.xlim",
"json.loads",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.clf",
"time.sleep",
"matplotlib.pyplot.figure",
"classes.Cluster",
"numpy.column_stack",
"glob.glob",
"matplotlib.pyplot.pause",
"re.compile"
] | [((251, 277), 'glob.glob', 'glob.glob', (['"""/dev/tty.usb*"""'], {}), "('/dev/tty.usb*')\n", (260, 277), False, 'import glob\n'), ((1445, 1470), 'numpy.column_stack', 'np.column_stack', (['(Xs, Ys)'], {}), '((Xs, Ys))\n', (1460, 1470), True, 'import numpy as np\n'), ((1485, 1529), 'classes.Cluster', 'Cluster', (['cluster_in'], {'eps': '(0.35)', 'min_samples': '(3)'}), '(cluster_in, eps=0.35, min_samples=3)\n', (1492, 1529), False, 'from classes import Point, Cluster\n'), ((1586, 1603), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-10)', '(10)'], {}), '(-10, 10)\n', (1594, 1603), True, 'import matplotlib.pyplot as plt\n'), ((1608, 1626), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.9)', '(18)'], {}), '(-0.9, 18)\n', (1616, 1626), True, 'import matplotlib.pyplot as plt\n'), ((1721, 1737), 'matplotlib.pyplot.pause', 'plt.pause', (['(1e-08)'], {}), '(1e-08)\n', (1730, 1737), True, 'import matplotlib.pyplot as plt\n'), ((1747, 1756), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1754, 1756), True, 'import matplotlib.pyplot as plt\n'), ((1940, 2270), 're.compile', 're.compile', (['"""\n \\\\x1B # ESC\n (?: # 7-bit C1 Fe (except CSI)\n [@-Z\\\\\\\\-_]\n | # or [ for CSI, followed by a control sequence\n \\\\[\n [0-?]* # Parameter bytes\n [ -/]* # Intermediate bytes\n [@-~] # Final byte\n )\n """', 're.VERBOSE'], {}), '(\n """\n \\\\x1B # ESC\n (?: # 7-bit C1 Fe (except CSI)\n [@-Z\\\\\\\\-_]\n | # or [ for CSI, followed by a control sequence\n \\\\[\n [0-?]* # Parameter bytes\n [ -/]* # Intermediate bytes\n [@-~] # Final byte\n )\n """\n , re.VERBOSE)\n', (1950, 2270), False, 'import re\n'), ((2356, 2368), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2366, 2368), True, 'import matplotlib.pyplot as plt\n'), ((1151, 1171), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'd'}), '(data=d)\n', (1163, 1171), True, 'import pandas as pd\n'), ((3632, 3647), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (3642, 3647), False, 'import time\n'), ((3188, 3204), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (3198, 3204), False, 'import json\n')] |
"""
Tests for the conversion module.
"""
import unittest
import numpy as np
from vlnm.conversion import (
hz_to_bark,
hz_to_erb,
hz_to_mel)
class TestHzToBark(unittest.TestCase):
"""
Test the hz_to_bark function.
"""
def setUp(self):
self.convert = lambda frq: 26.81 * frq / (frq + 1960) - 0.53
def test_hz_to_bark_number(self):
"""Test single number."""
data = np.random.randn(1)[0]
expected = self.convert(data)
actual = hz_to_bark(data)
self.assertEqual(expected, actual)
def test_hz_to_bark_vector(self):
"""Test vector."""
data = np.random.randn(100)
expected = self.convert(data)
actual = hz_to_bark(data)
self.assertTrue(np.array_equal(expected, actual))
def test_hz_to_bark_matrix(self):
"""Test matrix."""
data = np.random.randn(3, 100)
expected = self.convert(data)
actual = hz_to_bark(data)
self.assertTrue(np.array_equal(expected, actual))
class TestHzToErb(unittest.TestCase):
"""
Test the hz_to_erb function.
"""
def setUp(self):
self.convert = lambda frq: 21.4 * np.log(1 + 0.00437 * frq)
def test_hz_to_erb_number(self):
"""Test single number."""
data = np.random.randn(1)[0]
expected = self.convert(data)
actual = hz_to_erb(data)
self.assertEqual(expected, actual)
def test_hz_to_erb_vector(self):
"""Test vector."""
data = np.random.randn(100)
expected = self.convert(data)
actual = hz_to_erb(data)
self.assertTrue(np.array_equal(expected, actual))
def test_hz_to_erb_matrix(self):
"""Test matrix."""
data = np.random.randn(3, 100)
expected = self.convert(data)
actual = hz_to_erb(data)
self.assertTrue(np.array_equal(expected, actual))
class TestHzToMel(unittest.TestCase):
"""
Test the hz_to_mel function.
"""
def setUp(self):
self.convert = lambda frq: 1127. * np.log(1. + frq / 700.)
def test_hz_to_mel_number(self):
"""Test single number."""
data = np.random.randn(1)[0]
expected = self.convert(data)
actual = hz_to_mel(data)
self.assertEqual(expected, actual)
def test_hz_to_mel_vector(self):
"""Test vector."""
data = np.random.randn(100)
expected = self.convert(data)
actual = hz_to_mel(data)
self.assertTrue(np.array_equal(expected, actual))
def test_hz_to_mel_matrix(self):
"""Test matrix."""
data = np.random.randn(3, 100)
expected = self.convert(data)
actual = hz_to_mel(data)
self.assertTrue(np.array_equal(expected, actual))
| [
"numpy.log",
"numpy.random.randn",
"vlnm.conversion.hz_to_mel",
"numpy.array_equal",
"vlnm.conversion.hz_to_erb",
"vlnm.conversion.hz_to_bark"
] | [((502, 518), 'vlnm.conversion.hz_to_bark', 'hz_to_bark', (['data'], {}), '(data)\n', (512, 518), False, 'from vlnm.conversion import hz_to_bark, hz_to_erb, hz_to_mel\n'), ((643, 663), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (658, 663), True, 'import numpy as np\n'), ((719, 735), 'vlnm.conversion.hz_to_bark', 'hz_to_bark', (['data'], {}), '(data)\n', (729, 735), False, 'from vlnm.conversion import hz_to_bark, hz_to_erb, hz_to_mel\n'), ((875, 898), 'numpy.random.randn', 'np.random.randn', (['(3)', '(100)'], {}), '(3, 100)\n', (890, 898), True, 'import numpy as np\n'), ((954, 970), 'vlnm.conversion.hz_to_bark', 'hz_to_bark', (['data'], {}), '(data)\n', (964, 970), False, 'from vlnm.conversion import hz_to_bark, hz_to_erb, hz_to_mel\n'), ((1372, 1387), 'vlnm.conversion.hz_to_erb', 'hz_to_erb', (['data'], {}), '(data)\n', (1381, 1387), False, 'from vlnm.conversion import hz_to_bark, hz_to_erb, hz_to_mel\n'), ((1511, 1531), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (1526, 1531), True, 'import numpy as np\n'), ((1587, 1602), 'vlnm.conversion.hz_to_erb', 'hz_to_erb', (['data'], {}), '(data)\n', (1596, 1602), False, 'from vlnm.conversion import hz_to_bark, hz_to_erb, hz_to_mel\n'), ((1741, 1764), 'numpy.random.randn', 'np.random.randn', (['(3)', '(100)'], {}), '(3, 100)\n', (1756, 1764), True, 'import numpy as np\n'), ((1820, 1835), 'vlnm.conversion.hz_to_erb', 'hz_to_erb', (['data'], {}), '(data)\n', (1829, 1835), False, 'from vlnm.conversion import hz_to_bark, hz_to_erb, hz_to_mel\n'), ((2236, 2251), 'vlnm.conversion.hz_to_mel', 'hz_to_mel', (['data'], {}), '(data)\n', (2245, 2251), False, 'from vlnm.conversion import hz_to_bark, hz_to_erb, hz_to_mel\n'), ((2375, 2395), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (2390, 2395), True, 'import numpy as np\n'), ((2451, 2466), 'vlnm.conversion.hz_to_mel', 'hz_to_mel', (['data'], {}), '(data)\n', (2460, 2466), False, 'from vlnm.conversion import hz_to_bark, hz_to_erb, hz_to_mel\n'), ((2605, 2628), 'numpy.random.randn', 'np.random.randn', (['(3)', '(100)'], {}), '(3, 100)\n', (2620, 2628), True, 'import numpy as np\n'), ((2684, 2699), 'vlnm.conversion.hz_to_mel', 'hz_to_mel', (['data'], {}), '(data)\n', (2693, 2699), False, 'from vlnm.conversion import hz_to_bark, hz_to_erb, hz_to_mel\n'), ((425, 443), 'numpy.random.randn', 'np.random.randn', (['(1)'], {}), '(1)\n', (440, 443), True, 'import numpy as np\n'), ((760, 792), 'numpy.array_equal', 'np.array_equal', (['expected', 'actual'], {}), '(expected, actual)\n', (774, 792), True, 'import numpy as np\n'), ((995, 1027), 'numpy.array_equal', 'np.array_equal', (['expected', 'actual'], {}), '(expected, actual)\n', (1009, 1027), True, 'import numpy as np\n'), ((1295, 1313), 'numpy.random.randn', 'np.random.randn', (['(1)'], {}), '(1)\n', (1310, 1313), True, 'import numpy as np\n'), ((1627, 1659), 'numpy.array_equal', 'np.array_equal', (['expected', 'actual'], {}), '(expected, actual)\n', (1641, 1659), True, 'import numpy as np\n'), ((1860, 1892), 'numpy.array_equal', 'np.array_equal', (['expected', 'actual'], {}), '(expected, actual)\n', (1874, 1892), True, 'import numpy as np\n'), ((2159, 2177), 'numpy.random.randn', 'np.random.randn', (['(1)'], {}), '(1)\n', (2174, 2177), True, 'import numpy as np\n'), ((2491, 2523), 'numpy.array_equal', 'np.array_equal', (['expected', 'actual'], {}), '(expected, actual)\n', (2505, 2523), True, 'import numpy as np\n'), ((2724, 2756), 'numpy.array_equal', 'np.array_equal', (['expected', 'actual'], {}), '(expected, actual)\n', (2738, 2756), True, 'import numpy as np\n'), ((1182, 1207), 'numpy.log', 'np.log', (['(1 + 0.00437 * frq)'], {}), '(1 + 0.00437 * frq)\n', (1188, 1207), True, 'import numpy as np\n'), ((2048, 2073), 'numpy.log', 'np.log', (['(1.0 + frq / 700.0)'], {}), '(1.0 + frq / 700.0)\n', (2054, 2073), True, 'import numpy as np\n')] |
import numpy as np
from msdsl import *
m = MixedSignalModel('rc')
dt = m.add_analog_input('dt')
alpha = m.add_analog_output('alpha')
func = lambda dt: np.exp(-dt)
f = m.make_function(func, domain=[0, 10], numel=512, order=1)
m.set_from_sync_func(alpha, f, dt)
m.compile_and_print(VerilogGenerator()) | [
"numpy.exp"
] | [((151, 162), 'numpy.exp', 'np.exp', (['(-dt)'], {}), '(-dt)\n', (157, 162), True, 'import numpy as np\n')] |
import numpy as np
class Statistics(object):
def __init__(self):
self.histogram_db = {}
def train(self, images):
for image in images:
local_image_id = image['img_id']
if local_image_id not in self.histogram_db.keys():
self.histogram_db[local_image_id] = []
self.histogram_db[local_image_id].append(np.histogram(np.array(image['img'])))
def predict(self, image):
min_score = -1
best_index = None
local_img = image['img']
local_target_hist = np.histogram(np.array(local_img))
for key in self.histogram_db.keys():
local_score = 0
for hist in self.histogram_db[key]:
local_score += np.linalg.norm(local_target_hist[0] - hist[0])
local_score += np.linalg.norm(local_target_hist[1] - hist[1])
if min_score == -1 or local_score < min_score:
min_score = local_score
best_index = key
return best_index
| [
"numpy.linalg.norm",
"numpy.array"
] | [((570, 589), 'numpy.array', 'np.array', (['local_img'], {}), '(local_img)\n', (578, 589), True, 'import numpy as np\n'), ((743, 789), 'numpy.linalg.norm', 'np.linalg.norm', (['(local_target_hist[0] - hist[0])'], {}), '(local_target_hist[0] - hist[0])\n', (757, 789), True, 'import numpy as np\n'), ((821, 867), 'numpy.linalg.norm', 'np.linalg.norm', (['(local_target_hist[1] - hist[1])'], {}), '(local_target_hist[1] - hist[1])\n', (835, 867), True, 'import numpy as np\n'), ((391, 413), 'numpy.array', 'np.array', (["image['img']"], {}), "(image['img'])\n", (399, 413), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Using Unicode everywhere 🤗
===========================
This example demonstrates how to include non-ASCII characters, mostly emoji 🎉
to stress test the build and test environments that parse the example files.
"""
from __future__ import unicode_literals
# 🎉 👍
# Code source: <NAME>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['font.size'] = 20
plt.rcParams["font.monospace"] = ["DejaVu Sans Mono"]
plt.rcParams["font.family"] = "monospace"
plt.figure()
x = np.random.randn(100) * 2 + 1
y = np.random.randn(100) * 6 + 3
s = np.random.rand(*x.shape) * 800 + 500
plt.scatter(x, y, s, marker=r'$\oint$')
x = np.random.randn(60) * 7 - 4
y = np.random.randn(60) * 3 - 2
s = s[:x.size]
plt.scatter(x, y, s, alpha=0.5, c='g', marker=r'$\clubsuit$')
plt.xlabel('⇒')
plt.ylabel('⇒')
plt.title('♲' * 10)
print('Std out capture 😎')
# To avoid matplotlib text output
plt.show()
# %%
# Debug fonts
print(plt.rcParams)
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"numpy.random.randn",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.figure",
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((517, 529), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (527, 529), True, 'import matplotlib.pyplot as plt\n'), ((637, 676), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y', 's'], {'marker': '"""$\\\\oint$"""'}), "(x, y, s, marker='$\\\\oint$')\n", (648, 676), True, 'import matplotlib.pyplot as plt\n'), ((756, 817), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y', 's'], {'alpha': '(0.5)', 'c': '"""g"""', 'marker': '"""$\\\\clubsuit$"""'}), "(x, y, s, alpha=0.5, c='g', marker='$\\\\clubsuit$')\n", (767, 817), True, 'import matplotlib.pyplot as plt\n'), ((818, 833), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""⇒"""'], {}), "('⇒')\n", (828, 833), True, 'import matplotlib.pyplot as plt\n'), ((834, 849), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""⇒"""'], {}), "('⇒')\n", (844, 849), True, 'import matplotlib.pyplot as plt\n'), ((850, 869), 'matplotlib.pyplot.title', 'plt.title', (["('♲' * 10)"], {}), "('♲' * 10)\n", (859, 869), True, 'import matplotlib.pyplot as plt\n'), ((931, 941), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (939, 941), True, 'import matplotlib.pyplot as plt\n'), ((534, 554), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (549, 554), True, 'import numpy as np\n'), ((567, 587), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (582, 587), True, 'import numpy as np\n'), ((600, 624), 'numpy.random.rand', 'np.random.rand', (['*x.shape'], {}), '(*x.shape)\n', (614, 624), True, 'import numpy as np\n'), ((681, 700), 'numpy.random.randn', 'np.random.randn', (['(60)'], {}), '(60)\n', (696, 700), True, 'import numpy as np\n'), ((713, 732), 'numpy.random.randn', 'np.random.randn', (['(60)'], {}), '(60)\n', (728, 732), True, 'import numpy as np\n')] |
import argparse
import numpy as np
import os
import time
import torch
import random
import yaml
import gym
import csv
import multiprocessing as mp
from numba import njit
import scipy.stats as sps
import pickle
from concurrent.futures import ThreadPoolExecutor
from mpc import lattice_planner,pure_pursuit_utils
import sys
sys.path.append('./flows')
from maf import MAF
from group_maf import GroupMAF
@torch.no_grad()
def sampleFlow(model,obs,n_row,index):
model.eval()
u=model.base_dist.sample((n_row,1)).squeeze()
samples,_=model.inverse(u,torch.from_numpy(obs[index].astype(np.float32)))
return samples.numpy().astype(np.float64)
def getPlannerObs(obs):
ego_pose=[obs['poses_x'][EGO_IDX],obs['poses_y'][EGO_IDX],obs['poses_theta'][EGO_IDX]]
opp_pose=[obs['poses_x'][EGO_IDX+1],obs['poses_y'][EGO_IDX+1],obs['poses_theta'][EGO_IDX+1]]
ego_vel=obs['linear_vels_x'][EGO_IDX]
opp_vel=obs['linear_vels_x'][EGO_IDX+1]
return ego_pose,opp_pose,ego_vel,opp_vel
def getFlowObs(obs):
flow_obs=np.empty((NUM_AGENTS,COND_LABEL_SIZE))
for i in range(NUM_AGENTS):
flow_obs[i,:100]=np.take(obs['scans'][i],scan_sub_idx)/MAX_LIDAR
flow_obs[i,100]=obs['linear_vels_x'][i]/MAX_SPEED
return flow_obs
def unnormalizeFlow(grid):
grid[:,0]+=FLOW_S_SHIFT
grid[:,1]*=T_SCALE
grid[:,2]*=THETA_SCALE
grid[:,3:]*=V_SCALE
return grid
def normalizeFlow(grid):
grid[:,0]-=FLOW_S_SHIFT
grid[:,1]/=T_SCALE
grid[:,2]/=THETA_SCALE
grid[:,3:]/=V_SCALE
return grid
def getFlow(flow,flow_weights):
b=bytes(flow_weights.astype(np.uint8))
flow.load_state_dict(pickle.loads(b))
return flow
def load_raceline(file_path):
with open(file_path)as f:
waypoints=[tuple(line)for line in csv.reader(f)]
waypoints=np.array([(float(pt[0]),float(pt[1]),float(pt[2]),float(pt[3]),float(pt[4]),float(pt[5]))for pt in waypoints])
return waypoints
WAYPOINT_START_IDX=250
OPP_HEADSTART=1.5
SIDE_START=1.
def generateInitPoses(waypoints=None):
if waypoints is None:
ego_x=0
ego_y=0
ego_t=0
opp_x=ego_x+OPP_HEADSTART*np.cos(ego_t)
opp_y=ego_y+OPP_HEADSTART*np.sin(ego_t)
else:
pt=waypoints[WAYPOINT_START_IDX]
ego_t=pt[3]
ego_x=pt[0]+SIDE_START*np.cos(ego_t+np.pi/2)
ego_y=pt[1]+SIDE_START*np.sin(ego_t+np.pi/2)
opp_x=pt[0]+SIDE_START*np.cos(ego_t-np.pi/2)+OPP_HEADSTART*np.cos(ego_t)
opp_y=pt[1]+SIDE_START*np.sin(ego_t-np.pi/2)+OPP_HEADSTART*np.sin(ego_t)
return np.array([[ego_x,ego_y,ego_t],[opp_x,opp_y,ego_t]])
NUM_FLOW_SAMPLES=50
FLOW_S_SHIFT=5.
THETA_SCALE=0.25
T_SCALE=0.75
V_SCALE=2.0
NUM_AGENTS=2
EGO_IDX=0
INPUT_SIZE=6
PLANNER_DT=0.1
PHYSICS_DT=0.01
N_BLOCKS=5
HIDDEN_SIZE=100
N_HIDDEN=1
COND_LABEL_SIZE=101
ACTIVATION_FCN='relu'
INPUT_ORDER='sequential'
BATCH_NORM=False
mass=3.74
l_r=0.17145
I_z=0.04712
mu=0.523
h_cg=0.074
cs_f=4.718
cs_r=5.4562
MAX_LIDAR=30.
MAX_SPEED=20.
scan_sub_idx=np.linspace(0,1079,100).astype(int)
parser=argparse.ArgumentParser()
parser.add_argument('--result_npz_path',type=str,default='cost_weights.npz')
parser.add_argument('--update_belief',type=int,default=1,help='default 1')
parser.add_argument('--ball_size',type=float,default=0.1,help='default 0.1')
parser.add_argument('--ego_frozen_flow',type=int,default=0,help='default 0')
parser.add_argument('--eval_iters',type=int,default=1,help='default 1')
parser.add_argument('--same_guy',type=int,default=0,help='default 0')
parser.add_argument('--double_finish',type=int,default=1,help='default 1')
parser.add_argument('--record_regret',type=int,default=1,help='default 1')
ARGS=parser.parse_args()
CONFIG=None
in_docker=os.environ.get('IM_IN_DOCKER',False)
viz=not in_docker
VIZ=None
if viz:
from pango_visualizer import PangoViz
EVAL_ITERS=2*ARGS.eval_iters
GROUND_TRUTH_IDX=None
POOL=ThreadPoolExecutor(1)
def extract_cost(npz_in_path):
return np.load(npz_in_path)['cost_weights']
OPP_COST_WEIGHTS=extract_cost(ARGS.result_npz_path)
N_OPP_TOT,NUM_COSTS=OPP_COST_WEIGHTS.shape
DPP_IDX=np.array([8,10,22,33,57,94,127,136,150,153])
BEST_IDX=33
N_ARMS=8
ROLLOUT_LENGTH=250
exp3_const=(N_OPP_TOT+N_ARMS-1)*1./N_ARMS
ETA_EXP3=5*np.sqrt(2*np.log(N_OPP_TOT)/ROLLOUT_LENGTH/exp3_const)
OPP_IDX=np.arange(N_OPP_TOT,dtype=int)
DEVICE=torch.device('cuda:0' if torch.cuda.is_available()else 'cpu')
EGO_FLOW_MODEL=MAF(N_BLOCKS,INPUT_SIZE,HIDDEN_SIZE,N_HIDDEN,COND_LABEL_SIZE,ACTIVATION_FCN,INPUT_ORDER,BATCH_NORM).to(DEVICE)
OPP_FLOW_MODEL=MAF(N_BLOCKS,INPUT_SIZE,HIDDEN_SIZE,N_HIDDEN,COND_LABEL_SIZE,ACTIVATION_FCN,INPUT_ORDER,BATCH_NORM).to(DEVICE)
NUM_GUYS=N_OPP_TOT
MAX_S=6.0
RHO_EGO=ARGS.ball_size
RHO_OPP=RHO_EGO
USE_ONLY_DPP=True
if USE_ONLY_DPP:
NUM_GUYS=len(DPP_IDX)
DPP_MAP={}
for i,dpp_idx in enumerate(DPP_IDX):
DPP_MAP[dpp_idx]=i
@torch.no_grad()
def sampleGroupFlow(group_model,obs,n_row):
obs=torch.from_numpy(obs.astype(np.float32)).to(DEVICE)
group_model.eval()
group_u=group_model.base_dist.sample((group_model.group_length,n_row)).to(DEVICE)
group_obs=obs[None,None,:].repeat(group_model.group_length,1,1).to(DEVICE)
samples,_=group_model.inverse(group_u,group_obs)
if DEVICE.type=='cuda':
temp=samples.cpu().numpy().astype(np.float64)
else:
temp=samples.numpy().astype(np.float64)
return temp
def getGroupFlow(flow_weights_dir,cost_idx):
model_list=[]
for idx in cost_idx:
single_model=MAF(N_BLOCKS,INPUT_SIZE,HIDDEN_SIZE,N_HIDDEN,COND_LABEL_SIZE,ACTIVATION_FCN,INPUT_ORDER,BATCH_NORM).to(DEVICE)
single_model.load_state_dict(torch.load(flow_weights_dir+'/model_state_cost_'+str(idx)+'.pt',map_location=DEVICE))
model_list.append(single_model)
group_model=GroupMAF(DEVICE,model_list,INPUT_SIZE,HIDDEN_SIZE,N_HIDDEN,ACTIVATION_FCN,INPUT_ORDER).to(DEVICE)
group_model.eval()
return group_model
def unnormalizeGroupFlow(grid):
grid[:,:,0]+=FLOW_S_SHIFT
grid[:,:,1]*=T_SCALE
grid[:,:,2]*=THETA_SCALE
grid[:,:,3:]*=V_SCALE
return grid
def normalizeGroupFlow(grid):
grid[:,:,0]-=FLOW_S_SHIFT
grid[:,:,1]/=T_SCALE
grid[:,:,2]/=THETA_SCALE
grid[:,:,3:]/=V_SCALE
def sampleArm(n_arms,belief):
picked=np.random.choice(OPP_IDX,n_arms,replace=True,p=belief)
return picked
@njit(cache=True)
def cross(vec1,vec2):
return vec1[0]*vec2[1]-vec1[1]*vec2[0]
@njit(fastmath=False,cache=True)
def updateBeliefHelper(picked_idx_unique,glob_prev_s,glob_prev_opp_pose,waypoints):
end_idx_start=np.searchsorted(glob_prev_s,glob_prev_s[0]+2,side='right')
hi=range(end_idx_start,len(glob_prev_s))
if len(hi)==0:
return None
flow_samples=np.empty((len(hi),6))
for end_idx in range(end_idx_start,len(glob_prev_s)):
knots=np.linspace(0,end_idx,4).astype(np.int32)
knots=knots[1:]
vels=np.empty((3,))
for i in range(knots.shape[0]):
vels[i]=glob_prev_opp_pose[knots[i],3]
_,min_dist,min_frac_t,min_i=pure_pursuit_utils.nearest_point_on_trajectory_py2(glob_prev_opp_pose[knots[-1],0:2],waypoints[:,0:2])
nearest_waypoint=waypoints[min_i]
end_theta=glob_prev_opp_pose[knots[-1],2]-nearest_waypoint[3]
end_s=glob_prev_s[knots[-1]]-glob_prev_s[0]
vec_to_pt=glob_prev_opp_pose[knots[-1],0:2]-nearest_waypoint[0:2]
wpt_pt=np.array([np.cos(end_theta),np.sin(end_theta)])
if cross(wpt_pt,vec_to_pt)<0:
end_t=-min_dist
else:
end_t=min_dist
flow_samples[end_idx-end_idx_start]=np.concatenate((np.array([end_s,end_t,end_theta]),vels))
return flow_samples
@njit(fastmath=False,cache=True)
def EXP3(belief_vector,loss,picked_idx_unique,picked_idx_count,record_regret):
L=np.repeat(loss,picked_idx_count)
L_idx=np.repeat(picked_idx_unique,picked_idx_count)
m=L.shape[0]
for i in range(m):
idx=L_idx[i]
update=L[i]/belief_vector[idx]/m
belief_vector[idx]*=np.exp(-ETA_EXP3*update)
belief_vector/=np.sum(belief_vector)
if record_regret:
return np.mean(L)
def normalizeLogProb(log_prob):
log_prob[np.isnan(log_prob)]=-9.
log_prob=np.clip(log_prob,-9.,-6.)
log_prob=-(log_prob+6.)/(-6.+9.)
return log_prob
def updateBelief(belief_vector,picked_idx_unique,picked_idx_count,prev_s,prev_opp_pose,prev_opp_obs0,waypoints,opp_group_flow,opp_flow_choice,step_obs):
if opp_flow_choice is None:
return None
flow_obs=torch.from_numpy(step_obs[EGO_IDX+1][None,None,:].astype(np.float32)).repeat(NUM_GUYS,opp_flow_choice.shape[0],1).to(DEVICE)
normalized_flow_samples=torch.from_numpy(normalizeFlow(opp_flow_choice)[None,:,:].astype(np.float32)).repeat(NUM_GUYS,1,1).to(DEVICE)
log_prob=opp_group_flow.log_prob(normalized_flow_samples,flow_obs)
if not hasattr(updateBelief,'running_logprob'):
updateBelief.running_logprob=log_prob.sum(1)
updateBelief.steps=log_prob.shape[1]
else:
updateBelief.running_logprob=updateBelief.running_logprob+log_prob.sum(1)
updateBelief.steps+=log_prob.shape[1]
log_prob=updateBelief.running_logprob/updateBelief.steps
if not USE_ONLY_DPP:
picked_log_prob=log_prob[picked_idx_unique]
else:
real_idx=np.array([DPP_MAP[u]for u in picked_idx_unique])
picked_log_prob=log_prob[real_idx]
picked_log_prob=picked_log_prob.cpu().detach().numpy()
loss=normalizeLogProb(picked_log_prob)
if ARGS.record_regret:
insta_regret=EXP3(belief_vector,loss,picked_idx_unique,picked_idx_count,ARGS.record_regret)
if not USE_ONLY_DPP:
insta_regret-=normalizeLogProb(np.array([log_prob[GROUND_TRUTH_IDX].item()]))
else:
insta_regret-=normalizeLogProb(np.array([log_prob[DPP_MAP[GROUND_TRUTH_IDX]].item()]))
else:
EXP3(belief_vector,loss,picked_idx_unique,picked_idx_count,ARGS.record_regret)
if ARGS.record_regret:
return insta_regret
return None
def resetPlayers(ego_pose,opp_pose,ego_cost,opp_cost,ego_flow_weights,opp_flow_weights,racecar_env,waypoints,worker_directory):
map_name=CONFIG['map_name']
ego_flow=getFlow(EGO_FLOW_MODEL,ego_flow_weights)
opp_flow=getFlow(OPP_FLOW_MODEL,opp_flow_weights)
if not USE_ONLY_DPP:
opp_group_flow=getGroupFlow(worker_directory+'./flow_weights',OPP_IDX)
else:
opp_group_flow=getGroupFlow(worker_directory+'./flow_weights',DPP_IDX)
obs,step_reward,done,info=racecar_env.reset({'x':[ego_pose[0],opp_pose[0]],'y':[ego_pose[1],opp_pose[1]],'theta':[ego_pose[2],opp_pose[2]]})
if not hasattr(resetPlayers,'ego_planner'):
resetPlayers.ego_planner=None
resetPlayers.opp_planner=None
resetPlayers.multiple_planner=None
resetPlayers.multiple_planner_opp=None
if resetPlayers.ego_planner is None:
resetPlayers.ego_planner=lattice_planner.RobustLatticePlanner(worker_directory+'../maps/'+map_name,waypoints,worker_directory,ego_cost,is_ego=True)
resetPlayers.opp_planner=lattice_planner.RobustLatticePlanner(worker_directory+'../maps/'+map_name,waypoints,worker_directory,opp_cost,is_ego=False)
resetPlayers.multiple_planner=lattice_planner.RobustLatticePlanner(worker_directory+'../maps/'+map_name,waypoints,worker_directory,cost_weights=None,is_ego=True)
resetPlayers.multiple_planner_opp=lattice_planner.RobustLatticePlanner(worker_directory+'../maps/'+map_name,waypoints,worker_directory,cost_weights=None,is_ego=False)
else:
resetPlayers.ego_planner.update_cost(ego_cost)
resetPlayers.opp_planner.update_cost(opp_cost)
if hasattr(updateBelief,'running_logprob'):
delattr(updateBelief,'running_logprob')
delattr(updateBelief,'steps')
return resetPlayers.ego_planner,resetPlayers.opp_planner,resetPlayers.multiple_planner,resetPlayers.multiple_planner_opp,ego_flow,opp_flow,obs,opp_group_flow
def groupGridHelper(group_flow,step_obs,index,picked_unique):
group_grid=sampleGroupFlow(group_flow,step_obs[index,:],NUM_FLOW_SAMPLES)
if not USE_ONLY_DPP:
picked_grid=group_grid[picked_unique,:,:]
else:
real_idx=np.array([DPP_MAP[u]for u in picked_unique])
picked_grid=group_grid[real_idx,:,:]
if len(picked_grid.shape)<3:
picked_grid=picked_grid[None,:,:]
picked_grid=unnormalizeGroupFlow(picked_grid)
picked_cost_weights=OPP_COST_WEIGHTS[picked_unique,:]
if len(picked_grid.shape)<2:
picked_cost_weights=picked_cost_weights[None,:]
return picked_grid,picked_cost_weights
def simulationLoop(ego_pose0,opp_pose0,ego_cost,opp_cost,ego_flow_weights,opp_flow_weights,racecar_env,worker_directory,waypoints):
prev_pose=[]
prev_opp_pose=[]
prev_opp_obs=[]
prev_s=[]
future_list=[]
belief_vector=np.ones((N_OPP_TOT,))*(1.0/N_OPP_TOT)
belief_vector=np.zeros((N_OPP_TOT,))
belief_vector[DPP_IDX]=1.
belief_vector/=np.sum(belief_vector)
belief_vector_opp=np.ones((N_OPP_TOT,))*(1.0/N_OPP_TOT)
belief_vector_opp=np.zeros((N_OPP_TOT,))
belief_vector_opp[DPP_IDX]=1.
belief_vector_opp/=np.sum(belief_vector_opp)
ego_planner,opp_planner,multiple_planner,multiple_planner_opp,ego_flow,opp_flow,obs,opp_group_flow=resetPlayers(ego_pose0,opp_pose0,ego_cost,opp_cost,ego_flow_weights,opp_flow_weights,racecar_env,waypoints,worker_directory)
done=False
score=0.
checkpoint_times=[np.inf,np.inf]
belief_hist=[]
regret_hist=[]
pose_hist=[]
belief_hist.append(np.copy(belief_vector))
insta_regret=None
while not done:
ego_pose,opp_pose,ego_vel,opp_vel=getPlannerObs(obs)
pose_hist.append([*ego_pose,ego_vel,*opp_pose,opp_vel])
step_obs=getFlowObs(obs)
opp_lookup_grid=sampleFlow(opp_flow,step_obs,NUM_FLOW_SAMPLES,EGO_IDX+1)
ego_lookup_grid=sampleFlow(ego_flow,step_obs,NUM_FLOW_SAMPLES,EGO_IDX)
opp_lookup_grid=unnormalizeFlow(opp_lookup_grid)
ego_lookup_grid=unnormalizeFlow(ego_lookup_grid)
prev_opp_obs.append(step_obs[EGO_IDX+1,:])
prev_pose.append([*ego_pose,ego_vel])
prev_opp_pose.append([*opp_pose,opp_vel])
if len(prev_s)==0:
prev_s.append(0.)
continue
else:
prev_s.append(prev_s[-1]+np.linalg.norm(np.subtract(prev_opp_pose[-1][0:2],prev_opp_pose[-2][0:2])))
if len(prev_opp_pose)<4:
continue
while prev_s[-1]>=prev_s[0]+MAX_S:
prev_s.pop(0)
prev_opp_pose.pop(0)
prev_pose.pop(0)
prev_opp_obs.pop(0)
d_opp=np.subtract(prev_opp_pose[-1][0:2],prev_opp_pose[-2][0:2])
ds_opp=np.linalg.norm(d_opp)
d_ego=np.subtract(prev_pose[-1][0:2],prev_pose[-2][0:2])
ds_ego=np.linalg.norm(d_ego)
picked_idx_opp=sampleArm(N_ARMS,belief_vector_opp)
picked_idx_unique_opp,picked_idx_count_opp=np.unique(picked_idx_opp,return_counts=True)
picked_belief_opp=belief_vector_opp[picked_idx_unique_opp]
picked_grid_opp,picked_cost_weights_opp=groupGridHelper(opp_group_flow,step_obs,EGO_IDX,picked_idx_unique_opp)
oppego_picked_traj_list,oppego_picked_param_list=multiple_planner_opp.plan_multiple(ego_pose[0:3],opp_pose[0:3],picked_grid_opp,opp_planner.prev_traj,opp_planner.prev_param,ds_ego,ego_vel,picked_cost_weights_opp,picked_belief_opp)
oppego_picked_traj_list=np.concatenate(oppego_picked_traj_list,axis=0)
oppego_picked_param_list=np.vstack(oppego_picked_param_list)
op_pp_traj,op_safety_flag,opp_flow_choice,op_all_states,op_picked_state,op_xy_grid=opp_planner.plan_robust(opp_pose[0:3],ego_pose[0:3],opp_lookup_grid,oppego_picked_traj_list,oppego_picked_param_list,ds_opp,opp_vel,picked_idx_count_opp,RHO_OPP)
picked_idx=sampleArm(N_ARMS,belief_vector)
picked_idx_unique,picked_idx_count=np.unique(picked_idx,return_counts=True)
if len(future_list)>0:
if future_list[0].done():
belief_vector=future_list[0].result()
future_list.pop(0)
if ARGS.update_belief:
insta_regret=updateBelief(belief_vector,picked_idx_unique,picked_idx_count,np.array(prev_s),np.array(prev_opp_pose),prev_opp_obs[0],waypoints,opp_group_flow,opp_flow_choice,step_obs)
if ARGS.record_regret and insta_regret is not None:
regret_hist.append(insta_regret)
belief_hist.append(np.copy(belief_vector))
picked_belief=belief_vector[picked_idx_unique]
picked_grid,picked_cost_weights=groupGridHelper(opp_group_flow,step_obs,EGO_IDX+1,picked_idx_unique)
opp_picked_traj_list,opp_picked_param_list=multiple_planner.plan_multiple(opp_pose[0:3],ego_pose[0:3],picked_grid,ego_planner.prev_traj,ego_planner.prev_param,ds_opp,opp_vel,picked_cost_weights,picked_belief)
opp_picked_traj_list=np.concatenate(opp_picked_traj_list,axis=0)
opp_picked_param_list=np.vstack(opp_picked_param_list)
ego_pp_traj,ego_safety_flag,ego_flow_choice,ego_all_states,ego_picked_state,ego_xy_grid=ego_planner.plan_robust(ego_pose[0:3],opp_pose[0:3],ego_lookup_grid,opp_picked_traj_list,opp_picked_param_list,ds_ego,ego_vel,picked_idx_count,RHO_EGO)
if viz:
VIZ.update(obs,ego_lookup_grid,opp_lookup_grid,op_all_states,op_picked_state,ego_all_states,ego_picked_state,ego_planner.CORNER_ON,ego_xy_grid)
for i in range(int(PLANNER_DT/PHYSICS_DT)):
if i>0:
ego_pose,opp_pose,ego_vel,opp_vel=getPlannerObs(obs)
op_speed,op_steer=opp_planner.compute_action(op_pp_traj,op_safety_flag,opp_pose)
ego_speed,ego_steer=ego_planner.compute_action(ego_pp_traj,ego_safety_flag,ego_pose)
action={'ego_idx':EGO_IDX,'speed':[ego_speed,op_speed*lattice_planner.LatticePlanner.OPP_SPEED_SCALE],'steer':[ego_steer,op_steer]}
obs,step_reward,done,info=racecar_env.step(action)
score+=step_reward
if ARGS.double_finish:
for i,val in enumerate(info['checkpoint_done']):
if val and(checkpoint_times[i]==np.inf):
checkpoint_times[i]=score
if done:
break
return score,checkpoint_times,np.array(regret_hist),np.array(belief_hist),np.array(pose_hist)
def worker_func(worker_directory):
global GROUND_TRUTH_IDX
global VIZ
global WPTS_DIR,CONFIG
with open(worker_directory+'config.yaml','r')as yaml_stream:
try:
CONFIG=yaml.safe_load(yaml_stream)
speed_lut_name=CONFIG['speed_lut_name']
range_lut_name=CONFIG['range_lut_name']
csv_name=CONFIG['csv_name']
map_img_ext=CONFIG['map_img_ext']
map_name=CONFIG['map_name']
map_prefix=CONFIG['map_prefix']
except yaml.YAMLError as ex:
print(ex)
WPTS_DIR=worker_directory+'../maps/'+csv_name
waypoints=load_raceline(WPTS_DIR)
if viz:
VIZ=PangoViz(worker_directory,worker_directory+'../maps/'+map_prefix+map_img_ext,worker_directory+'../maps/'+map_name,waypoints,False)
racecar_env=gym.make('f110_gym:f110-v0')
racecar_env.init_map(worker_directory+'../maps/'+map_name,map_img_ext,False,False)
racecar_env.update_params(mu,h_cg,l_r,cs_f,cs_r,I_z,mass,worker_directory+'../build/',ARGS.double_finish)
poses0=generateInitPoses(waypoints)
flow_weights_dir=worker_directory+'flow_weights/model_state_cost_'
counter=0
for opp_idx in DPP_IDX:
score_histhist=[]
checkpoint_histhist=[]
regret_histhist=[]
belief_histhist=[]
pose_histhist=[]
print('ground truth opp idx',opp_idx)
if ARGS.record_regret:
GROUND_TRUTH_IDX=opp_idx
ego_idx=BEST_IDX
if ARGS.same_guy:
ego_idx=opp_idx
ego_cost=OPP_COST_WEIGHTS[ego_idx]
opp_cost=OPP_COST_WEIGHTS[opp_idx]
if ARGS.ego_frozen_flow:
ego_flow_weights=np.array(bytearray(pickle.dumps(torch.load(worker_directory+'flows/model_state.pt',map_location=DEVICE)))).astype(np.float64)
else:
ego_flow_weights=np.array(bytearray(pickle.dumps(torch.load(flow_weights_dir+str(ego_idx)+'.pt',map_location=DEVICE)))).astype(np.float64)
opp_flow_weights=np.array(bytearray(pickle.dumps(torch.load(flow_weights_dir+str(opp_idx)+'.pt',map_location=DEVICE)))).astype(np.float64)
for ii in range(EVAL_ITERS):
seed=int(ego_cost[0]*1e6+ii)
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
if DEVICE.type=='cuda':
torch.cuda.manual_seed(seed)
start_time=time.time()
if ii<EVAL_ITERS/2:
score,checkpoint_times,regret_hist,belief_hist,pose_hist=simulationLoop(poses0[EGO_IDX],poses0[EGO_IDX+1],ego_cost,opp_cost,ego_flow_weights,opp_flow_weights,racecar_env,worker_directory,waypoints)
print('checkpoint',checkpoint_times)
else:
score,checkpoint_times,regret_hist,belief_hist,pose_hist=simulationLoop(poses0[EGO_IDX+1],poses0[EGO_IDX],ego_cost,opp_cost,ego_flow_weights,opp_flow_weights,racecar_env,worker_directory,waypoints)
print('checkpoint',checkpoint_times)
print('Iteration time: '+str(time.time()-start_time),'Score',score)
score_histhist.append(score)
checkpoint_histhist.append(checkpoint_times)
regret_histhist.append(regret_hist)
belief_histhist.append(belief_hist)
pose_histhist.append(pose_hist)
savestring='belief'+str(ARGS.update_belief)+'_'+'ball_size'+str(ARGS.ball_size)+'_'+'frozen'+str(ARGS.ego_frozen_flow)+'_'+'iters'+str(ARGS.eval_iters)+'_'+'sameguy'+str(ARGS.same_guy)+'_'+str(opp_idx)
np.savez_compressed(savestring+'.npz',score=score_histhist,checkpoint_times=checkpoint_histhist,regret_hist=regret_histhist,belief_hist=belief_histhist,pose_hist=pose_histhist,args=ARGS)
if __name__=="__main__":
worker_func('./')
# Created by pyminifier (https://github.com/liftoff/pyminifier)
| [
"numpy.load",
"numpy.sum",
"argparse.ArgumentParser",
"numpy.random.seed",
"csv.reader",
"numpy.empty",
"numba.njit",
"numpy.ones",
"numpy.clip",
"numpy.isnan",
"numpy.savez_compressed",
"numpy.mean",
"numpy.arange",
"numpy.exp",
"numpy.linalg.norm",
"yaml.safe_load",
"numpy.sin",
... | [((323, 349), 'sys.path.append', 'sys.path.append', (['"""./flows"""'], {}), "('./flows')\n", (338, 349), False, 'import sys\n'), ((403, 418), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (416, 418), False, 'import torch\n'), ((2862, 2887), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2885, 2887), False, 'import argparse\n'), ((3534, 3571), 'os.environ.get', 'os.environ.get', (['"""IM_IN_DOCKER"""', '(False)'], {}), "('IM_IN_DOCKER', False)\n", (3548, 3571), False, 'import os\n'), ((3702, 3723), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', (['(1)'], {}), '(1)\n', (3720, 3723), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((3905, 3958), 'numpy.array', 'np.array', (['[8, 10, 22, 33, 57, 94, 127, 136, 150, 153]'], {}), '([8, 10, 22, 33, 57, 94, 127, 136, 150, 153])\n', (3913, 3958), True, 'import numpy as np\n'), ((4106, 4137), 'numpy.arange', 'np.arange', (['N_OPP_TOT'], {'dtype': 'int'}), '(N_OPP_TOT, dtype=int)\n', (4115, 4137), True, 'import numpy as np\n'), ((4657, 4672), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4670, 4672), False, 'import torch\n'), ((6033, 6049), 'numba.njit', 'njit', ([], {'cache': '(True)'}), '(cache=True)\n', (6037, 6049), False, 'from numba import njit\n'), ((6114, 6146), 'numba.njit', 'njit', ([], {'fastmath': '(False)', 'cache': '(True)'}), '(fastmath=False, cache=True)\n', (6118, 6146), False, 'from numba import njit\n'), ((7232, 7264), 'numba.njit', 'njit', ([], {'fastmath': '(False)', 'cache': '(True)'}), '(fastmath=False, cache=True)\n', (7236, 7264), False, 'from numba import njit\n'), ((999, 1038), 'numpy.empty', 'np.empty', (['(NUM_AGENTS, COND_LABEL_SIZE)'], {}), '((NUM_AGENTS, COND_LABEL_SIZE))\n', (1007, 1038), True, 'import numpy as np\n'), ((2379, 2435), 'numpy.array', 'np.array', (['[[ego_x, ego_y, ego_t], [opp_x, opp_y, ego_t]]'], {}), '([[ego_x, ego_y, ego_t], [opp_x, opp_y, ego_t]])\n', (2387, 2435), True, 'import numpy as np\n'), ((5961, 6018), 'numpy.random.choice', 'np.random.choice', (['OPP_IDX', 'n_arms'], {'replace': '(True)', 'p': 'belief'}), '(OPP_IDX, n_arms, replace=True, p=belief)\n', (5977, 6018), True, 'import numpy as np\n'), ((6245, 6307), 'numpy.searchsorted', 'np.searchsorted', (['glob_prev_s', '(glob_prev_s[0] + 2)'], {'side': '"""right"""'}), "(glob_prev_s, glob_prev_s[0] + 2, side='right')\n", (6260, 6307), True, 'import numpy as np\n'), ((7346, 7379), 'numpy.repeat', 'np.repeat', (['loss', 'picked_idx_count'], {}), '(loss, picked_idx_count)\n', (7355, 7379), True, 'import numpy as np\n'), ((7386, 7432), 'numpy.repeat', 'np.repeat', (['picked_idx_unique', 'picked_idx_count'], {}), '(picked_idx_unique, picked_idx_count)\n', (7395, 7432), True, 'import numpy as np\n'), ((7579, 7600), 'numpy.sum', 'np.sum', (['belief_vector'], {}), '(belief_vector)\n', (7585, 7600), True, 'import numpy as np\n'), ((7717, 7746), 'numpy.clip', 'np.clip', (['log_prob', '(-9.0)', '(-6.0)'], {}), '(log_prob, -9.0, -6.0)\n', (7724, 7746), True, 'import numpy as np\n'), ((12070, 12092), 'numpy.zeros', 'np.zeros', (['(N_OPP_TOT,)'], {}), '((N_OPP_TOT,))\n', (12078, 12092), True, 'import numpy as np\n'), ((12136, 12157), 'numpy.sum', 'np.sum', (['belief_vector'], {}), '(belief_vector)\n', (12142, 12157), True, 'import numpy as np\n'), ((12234, 12256), 'numpy.zeros', 'np.zeros', (['(N_OPP_TOT,)'], {}), '((N_OPP_TOT,))\n', (12242, 12256), True, 'import numpy as np\n'), ((12308, 12333), 'numpy.sum', 'np.sum', (['belief_vector_opp'], {}), '(belief_vector_opp)\n', (12314, 12333), True, 'import numpy as np\n'), ((17666, 17694), 'gym.make', 'gym.make', (['"""f110_gym:f110-v0"""'], {}), "('f110_gym:f110-v0')\n", (17674, 17694), False, 'import gym\n'), ((1558, 1573), 'pickle.loads', 'pickle.loads', (['b'], {}), '(b)\n', (1570, 1573), False, 'import pickle\n'), ((2819, 2844), 'numpy.linspace', 'np.linspace', (['(0)', '(1079)', '(100)'], {}), '(0, 1079, 100)\n', (2830, 2844), True, 'import numpy as np\n'), ((3764, 3784), 'numpy.load', 'np.load', (['npz_in_path'], {}), '(npz_in_path)\n', (3771, 3784), True, 'import numpy as np\n'), ((4169, 4194), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4192, 4194), False, 'import torch\n'), ((4221, 4331), 'maf.MAF', 'MAF', (['N_BLOCKS', 'INPUT_SIZE', 'HIDDEN_SIZE', 'N_HIDDEN', 'COND_LABEL_SIZE', 'ACTIVATION_FCN', 'INPUT_ORDER', 'BATCH_NORM'], {}), '(N_BLOCKS, INPUT_SIZE, HIDDEN_SIZE, N_HIDDEN, COND_LABEL_SIZE,\n ACTIVATION_FCN, INPUT_ORDER, BATCH_NORM)\n', (4224, 4331), False, 'from maf import MAF\n'), ((4347, 4457), 'maf.MAF', 'MAF', (['N_BLOCKS', 'INPUT_SIZE', 'HIDDEN_SIZE', 'N_HIDDEN', 'COND_LABEL_SIZE', 'ACTIVATION_FCN', 'INPUT_ORDER', 'BATCH_NORM'], {}), '(N_BLOCKS, INPUT_SIZE, HIDDEN_SIZE, N_HIDDEN, COND_LABEL_SIZE,\n ACTIVATION_FCN, INPUT_ORDER, BATCH_NORM)\n', (4350, 4457), False, 'from maf import MAF\n'), ((6542, 6556), 'numpy.empty', 'np.empty', (['(3,)'], {}), '((3,))\n', (6550, 6556), True, 'import numpy as np\n'), ((6663, 6773), 'mpc.pure_pursuit_utils.nearest_point_on_trajectory_py2', 'pure_pursuit_utils.nearest_point_on_trajectory_py2', (['glob_prev_opp_pose[knots[-1], 0:2]', 'waypoints[:, 0:2]'], {}), '(glob_prev_opp_pose[knots\n [-1], 0:2], waypoints[:, 0:2])\n', (6713, 6773), False, 'from mpc import lattice_planner, pure_pursuit_utils\n'), ((7538, 7564), 'numpy.exp', 'np.exp', (['(-ETA_EXP3 * update)'], {}), '(-ETA_EXP3 * update)\n', (7544, 7564), True, 'import numpy as np\n'), ((7629, 7639), 'numpy.mean', 'np.mean', (['L'], {}), '(L)\n', (7636, 7639), True, 'import numpy as np\n'), ((7683, 7701), 'numpy.isnan', 'np.isnan', (['log_prob'], {}), '(log_prob)\n', (7691, 7701), True, 'import numpy as np\n'), ((8731, 8780), 'numpy.array', 'np.array', (['[DPP_MAP[u] for u in picked_idx_unique]'], {}), '([DPP_MAP[u] for u in picked_idx_unique])\n', (8739, 8780), True, 'import numpy as np\n'), ((10209, 10343), 'mpc.lattice_planner.RobustLatticePlanner', 'lattice_planner.RobustLatticePlanner', (["(worker_directory + '../maps/' + map_name)", 'waypoints', 'worker_directory', 'ego_cost'], {'is_ego': '(True)'}), "(worker_directory + '../maps/' +\n map_name, waypoints, worker_directory, ego_cost, is_ego=True)\n", (10245, 10343), False, 'from mpc import lattice_planner, pure_pursuit_utils\n'), ((10359, 10494), 'mpc.lattice_planner.RobustLatticePlanner', 'lattice_planner.RobustLatticePlanner', (["(worker_directory + '../maps/' + map_name)", 'waypoints', 'worker_directory', 'opp_cost'], {'is_ego': '(False)'}), "(worker_directory + '../maps/' +\n map_name, waypoints, worker_directory, opp_cost, is_ego=False)\n", (10395, 10494), False, 'from mpc import lattice_planner, pure_pursuit_utils\n'), ((10515, 10658), 'mpc.lattice_planner.RobustLatticePlanner', 'lattice_planner.RobustLatticePlanner', (["(worker_directory + '../maps/' + map_name)", 'waypoints', 'worker_directory'], {'cost_weights': 'None', 'is_ego': '(True)'}), "(worker_directory + '../maps/' +\n map_name, waypoints, worker_directory, cost_weights=None, is_ego=True)\n", (10551, 10658), False, 'from mpc import lattice_planner, pure_pursuit_utils\n'), ((10683, 10827), 'mpc.lattice_planner.RobustLatticePlanner', 'lattice_planner.RobustLatticePlanner', (["(worker_directory + '../maps/' + map_name)", 'waypoints', 'worker_directory'], {'cost_weights': 'None', 'is_ego': '(False)'}), "(worker_directory + '../maps/' +\n map_name, waypoints, worker_directory, cost_weights=None, is_ego=False)\n", (10719, 10827), False, 'from mpc import lattice_planner, pure_pursuit_utils\n'), ((11421, 11466), 'numpy.array', 'np.array', (['[DPP_MAP[u] for u in picked_unique]'], {}), '([DPP_MAP[u] for u in picked_unique])\n', (11429, 11466), True, 'import numpy as np\n'), ((12017, 12038), 'numpy.ones', 'np.ones', (['(N_OPP_TOT,)'], {}), '((N_OPP_TOT,))\n', (12024, 12038), True, 'import numpy as np\n'), ((12177, 12198), 'numpy.ones', 'np.ones', (['(N_OPP_TOT,)'], {}), '((N_OPP_TOT,))\n', (12184, 12198), True, 'import numpy as np\n'), ((12681, 12703), 'numpy.copy', 'np.copy', (['belief_vector'], {}), '(belief_vector)\n', (12688, 12703), True, 'import numpy as np\n'), ((13596, 13655), 'numpy.subtract', 'np.subtract', (['prev_opp_pose[-1][0:2]', 'prev_opp_pose[-2][0:2]'], {}), '(prev_opp_pose[-1][0:2], prev_opp_pose[-2][0:2])\n', (13607, 13655), True, 'import numpy as np\n'), ((13664, 13685), 'numpy.linalg.norm', 'np.linalg.norm', (['d_opp'], {}), '(d_opp)\n', (13678, 13685), True, 'import numpy as np\n'), ((13694, 13745), 'numpy.subtract', 'np.subtract', (['prev_pose[-1][0:2]', 'prev_pose[-2][0:2]'], {}), '(prev_pose[-1][0:2], prev_pose[-2][0:2])\n', (13705, 13745), True, 'import numpy as np\n'), ((13754, 13775), 'numpy.linalg.norm', 'np.linalg.norm', (['d_ego'], {}), '(d_ego)\n', (13768, 13775), True, 'import numpy as np\n'), ((13874, 13919), 'numpy.unique', 'np.unique', (['picked_idx_opp'], {'return_counts': '(True)'}), '(picked_idx_opp, return_counts=True)\n', (13883, 13919), True, 'import numpy as np\n'), ((14352, 14399), 'numpy.concatenate', 'np.concatenate', (['oppego_picked_traj_list'], {'axis': '(0)'}), '(oppego_picked_traj_list, axis=0)\n', (14366, 14399), True, 'import numpy as np\n'), ((14426, 14461), 'numpy.vstack', 'np.vstack', (['oppego_picked_param_list'], {}), '(oppego_picked_param_list)\n', (14435, 14461), True, 'import numpy as np\n'), ((14791, 14832), 'numpy.unique', 'np.unique', (['picked_idx'], {'return_counts': '(True)'}), '(picked_idx, return_counts=True)\n', (14800, 14832), True, 'import numpy as np\n'), ((15683, 15727), 'numpy.concatenate', 'np.concatenate', (['opp_picked_traj_list'], {'axis': '(0)'}), '(opp_picked_traj_list, axis=0)\n', (15697, 15727), True, 'import numpy as np\n'), ((15751, 15783), 'numpy.vstack', 'np.vstack', (['opp_picked_param_list'], {}), '(opp_picked_param_list)\n', (15760, 15783), True, 'import numpy as np\n'), ((16893, 16914), 'numpy.array', 'np.array', (['regret_hist'], {}), '(regret_hist)\n', (16901, 16914), True, 'import numpy as np\n'), ((16915, 16936), 'numpy.array', 'np.array', (['belief_hist'], {}), '(belief_hist)\n', (16923, 16936), True, 'import numpy as np\n'), ((16937, 16956), 'numpy.array', 'np.array', (['pose_hist'], {}), '(pose_hist)\n', (16945, 16956), True, 'import numpy as np\n'), ((17522, 17670), 'pango_visualizer.PangoViz', 'PangoViz', (['worker_directory', "(worker_directory + '../maps/' + map_prefix + map_img_ext)", "(worker_directory + '../maps/' + map_name)", 'waypoints', '(False)'], {}), "(worker_directory, worker_directory + '../maps/' + map_prefix +\n map_img_ext, worker_directory + '../maps/' + map_name, waypoints, False)\n", (17530, 17670), False, 'from pango_visualizer import PangoViz\n'), ((20031, 20233), 'numpy.savez_compressed', 'np.savez_compressed', (["(savestring + '.npz')"], {'score': 'score_histhist', 'checkpoint_times': 'checkpoint_histhist', 'regret_hist': 'regret_histhist', 'belief_hist': 'belief_histhist', 'pose_hist': 'pose_histhist', 'args': 'ARGS'}), "(savestring + '.npz', score=score_histhist,\n checkpoint_times=checkpoint_histhist, regret_hist=regret_histhist,\n belief_hist=belief_histhist, pose_hist=pose_histhist, args=ARGS)\n", (20050, 20233), True, 'import numpy as np\n'), ((1086, 1124), 'numpy.take', 'np.take', (["obs['scans'][i]", 'scan_sub_idx'], {}), "(obs['scans'][i], scan_sub_idx)\n", (1093, 1124), True, 'import numpy as np\n'), ((5511, 5607), 'group_maf.GroupMAF', 'GroupMAF', (['DEVICE', 'model_list', 'INPUT_SIZE', 'HIDDEN_SIZE', 'N_HIDDEN', 'ACTIVATION_FCN', 'INPUT_ORDER'], {}), '(DEVICE, model_list, INPUT_SIZE, HIDDEN_SIZE, N_HIDDEN,\n ACTIVATION_FCN, INPUT_ORDER)\n', (5519, 5607), False, 'from group_maf import GroupMAF\n'), ((15273, 15295), 'numpy.copy', 'np.copy', (['belief_vector'], {}), '(belief_vector)\n', (15280, 15295), True, 'import numpy as np\n'), ((17133, 17160), 'yaml.safe_load', 'yaml.safe_load', (['yaml_stream'], {}), '(yaml_stream)\n', (17147, 17160), False, 'import yaml\n'), ((18888, 18908), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (18902, 18908), True, 'import numpy as np\n'), ((18912, 18929), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (18923, 18929), False, 'import random\n'), ((18933, 18956), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (18950, 18956), False, 'import torch\n'), ((19031, 19042), 'time.time', 'time.time', ([], {}), '()\n', (19040, 19042), False, 'import time\n'), ((1682, 1695), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (1692, 1695), False, 'import csv\n'), ((2015, 2028), 'numpy.cos', 'np.cos', (['ego_t'], {}), '(ego_t)\n', (2021, 2028), True, 'import numpy as np\n'), ((2057, 2070), 'numpy.sin', 'np.sin', (['ego_t'], {}), '(ego_t)\n', (2063, 2070), True, 'import numpy as np\n'), ((2152, 2177), 'numpy.cos', 'np.cos', (['(ego_t + np.pi / 2)'], {}), '(ego_t + np.pi / 2)\n', (2158, 2177), True, 'import numpy as np\n'), ((2199, 2224), 'numpy.sin', 'np.sin', (['(ego_t + np.pi / 2)'], {}), '(ego_t + np.pi / 2)\n', (2205, 2224), True, 'import numpy as np\n'), ((2282, 2295), 'numpy.cos', 'np.cos', (['ego_t'], {}), '(ego_t)\n', (2288, 2295), True, 'import numpy as np\n'), ((2357, 2370), 'numpy.sin', 'np.sin', (['ego_t'], {}), '(ego_t)\n', (2363, 2370), True, 'import numpy as np\n'), ((5236, 5346), 'maf.MAF', 'MAF', (['N_BLOCKS', 'INPUT_SIZE', 'HIDDEN_SIZE', 'N_HIDDEN', 'COND_LABEL_SIZE', 'ACTIVATION_FCN', 'INPUT_ORDER', 'BATCH_NORM'], {}), '(N_BLOCKS, INPUT_SIZE, HIDDEN_SIZE, N_HIDDEN, COND_LABEL_SIZE,\n ACTIVATION_FCN, INPUT_ORDER, BATCH_NORM)\n', (5239, 5346), False, 'from maf import MAF\n'), ((6475, 6501), 'numpy.linspace', 'np.linspace', (['(0)', 'end_idx', '(4)'], {}), '(0, end_idx, 4)\n', (6486, 6501), True, 'import numpy as np\n'), ((6999, 7016), 'numpy.cos', 'np.cos', (['end_theta'], {}), '(end_theta)\n', (7005, 7016), True, 'import numpy as np\n'), ((7017, 7034), 'numpy.sin', 'np.sin', (['end_theta'], {}), '(end_theta)\n', (7023, 7034), True, 'import numpy as np\n'), ((7168, 7203), 'numpy.array', 'np.array', (['[end_s, end_t, end_theta]'], {}), '([end_s, end_t, end_theta])\n', (7176, 7203), True, 'import numpy as np\n'), ((15054, 15070), 'numpy.array', 'np.array', (['prev_s'], {}), '(prev_s)\n', (15062, 15070), True, 'import numpy as np\n'), ((15071, 15094), 'numpy.array', 'np.array', (['prev_opp_pose'], {}), '(prev_opp_pose)\n', (15079, 15094), True, 'import numpy as np\n'), ((18988, 19016), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (19010, 19016), False, 'import torch\n'), ((2246, 2271), 'numpy.cos', 'np.cos', (['(ego_t - np.pi / 2)'], {}), '(ego_t - np.pi / 2)\n', (2252, 2271), True, 'import numpy as np\n'), ((2321, 2346), 'numpy.sin', 'np.sin', (['(ego_t - np.pi / 2)'], {}), '(ego_t - np.pi / 2)\n', (2327, 2346), True, 'import numpy as np\n'), ((4053, 4070), 'numpy.log', 'np.log', (['N_OPP_TOT'], {}), '(N_OPP_TOT)\n', (4059, 4070), True, 'import numpy as np\n'), ((13365, 13424), 'numpy.subtract', 'np.subtract', (['prev_opp_pose[-1][0:2]', 'prev_opp_pose[-2][0:2]'], {}), '(prev_opp_pose[-1][0:2], prev_opp_pose[-2][0:2])\n', (13376, 13424), True, 'import numpy as np\n'), ((19593, 19604), 'time.time', 'time.time', ([], {}), '()\n', (19602, 19604), False, 'import time\n'), ((18437, 18511), 'torch.load', 'torch.load', (["(worker_directory + 'flows/model_state.pt')"], {'map_location': 'DEVICE'}), "(worker_directory + 'flows/model_state.pt', map_location=DEVICE)\n", (18447, 18511), False, 'import torch\n')] |
import numpy as np
class ReservoirSampler(object):
"""Finds a random subset k elements from a stream of data in O(k) space.
See https://en.wikipedia.org/wiki/Reservoir_sampling.
"""
def __init__(self, k):
self.samples = []
self.num_seen = 0
self.k = k
def add(self, item):
self.num_seen += 1
if self.num_seen <= self.k:
self.samples.append(item)
elif np.random.rand(1)[0] <= self.k / (1.0 * self.num_seen):
self.samples[np.random.choice(range(self.k))] = item
def get_sample(self):
return self.samples[:]
| [
"numpy.random.rand"
] | [((436, 453), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (450, 453), True, 'import numpy as np\n')] |
import numpy as np
import _pickle as cPickle
import copy
from pommerman import forward_model
from pommerman import constants
from pommerman import characters
from pommerman import utility
class EnvSimulator:
@staticmethod
def get_initial_game_data(obs, my_id, max_steps=1000):
game_data = GameData()
game_data.board_size = len(obs['board'])
game_data.step_count = obs['step_count'] - 1
game_data.max_steps = max_steps
game_data.game_type = obs['game_type']
game_data.simulation_bomb_life = None
# board
game_data.board = EnvSimulator.get_board(game_data.board_size, obs['board'])
# items
game_data.items = {}
# agents
game_data.agents = []
for id in [constants.Item.Agent0.value - 10, constants.Item.Agent1.value - 10]:
board_id = id + 10
agent = characters.Bomber(id, game_data.game_type)
agent.set_start_position(EnvSimulator.get_position(game_data.board, board_id, True))
if (id == my_id):
agent.reset(int(obs['ammo']), board_id in obs['alive'], int(obs['blast_strength']),
bool(obs['can_kick']))
else:
agent.reset(agent.ammo, board_id in obs['alive'], agent.blast_strength, agent.can_kick)
game_data.agents.append(agent)
# bombs
game_data.bombs = []
bomb_array = EnvSimulator.get_position(game_data.board, constants.Item.Bomb.value, False)
if len(bomb_array) > 0:
raise ValueError('Invalid: no bombs allowed in initial state')
# flames
game_data.flames = []
flame_array = EnvSimulator.get_position(game_data.board, constants.Item.Flames.value, False)
if len(flame_array) > 0:
raise ValueError('Invalid: no flames allowed in initial state')
# done
game_data.done = forward_model.ForwardModel.get_done(game_data.agents, game_data.step_count,
game_data.max_steps, game_data.game_type, None)
return game_data
@staticmethod
def update(game_data, obs, my_id):
enemy_id = 0
if my_id is 0: enemy_id = 1
if (game_data.board_size != len(obs['board'])):
raise ValueError('Invalid update: boardsize different!')
if (game_data.step_count + 1 != obs['step_count']):
raise ValueError('Invalid update: missed step count!')
game_data.step_count = obs['step_count']
new_board = EnvSimulator.get_board(game_data.board_size, obs['board'])
new_bomb_life = EnvSimulator.get_board(game_data.board_size, obs['bomb_life'], 0)
new_bomb_strength = EnvSimulator.get_board(game_data.board_size, obs['bomb_blast_strength'], 0)
reset = False
# get actions
actions = {}
for a in game_data.agents:
old_pos = EnvSimulator.get_position(game_data.board, a.agent_id + 10, True)
new_pos = EnvSimulator.get_position(new_board, a.agent_id + 10, True)
if not a.is_alive:
raise ValueError('update error: agent life!')
for b in game_data.bombs:
if b.moving_direction != None:
pass
if (old_pos != new_pos):
actions[a.agent_id] = utility.get_direction(old_pos, new_pos).value
if not a.can_kick and game_data.board[new_pos] == constants.Item.Bomb.value:
for b in game_data.bombs:
if b.position == new_pos and b.moving_direction == None:
a.can_kick = True
reset = True
elif new_bomb_life[new_pos] == constants.DEFAULT_BOMB_LIFE:
actions[a.agent_id] = constants.Action.Bomb.value
if a.ammo == 0:
a.ammo += 1
reset = True
if a.blast_strength != new_bomb_strength[new_pos]:
a.blast_strength = new_bomb_strength[new_pos]
reset = True
else:
actions[a.agent_id] = constants.Action.Stop.value
save_game_data = copy.deepcopy(game_data)
EnvSimulator.act(game_data, actions)
if game_data.agents[0].is_alive != (10 in obs['alive']):
raise ValueError(f'update error: agent life!\n\n{game_data.board}\n\n{new_board}')
if game_data.agents[1].is_alive != (11 in obs['alive']):
raise ValueError(f'update error: agent life!\n\n{game_data.board}\n\n{new_board}')
if (len(game_data.bombs) != len(new_bomb_life[new_bomb_life > 0])):
raise ValueError(f'update error: bomb count!\n\n{game_data.board}\n\n{new_board}')
# print("board: \n", game_data.board)
# print("agent1: ", game_data.agents[0].ammo, game_data.agents[0].blast_strength, game_data.agents[0].can_kick)
# print("agent2: ", game_data.agents[1].ammo, game_data.agents[1].blast_strength, game_data.agents[1].can_kick)
# compare boards
equal, equal_noitems = EnvSimulator.boards_equal(game_data.board, new_board, True)
if not equal:
if equal_noitems:
reset = True # EQUAL WITHOUT ITEMS => SOMEWHERE NEW ITEMS AVAILABLE -> RESET
else:
print(f'board unequal: {game_data.board}\n\n{new_board}\n\n{actions}')
def find_actions(save_game_data, actions):
actions_1 = [actions[0]] if actions[0] != 0 else range(1, 6)
actions_2 = [actions[1]] if actions[1] != 0 else range(1, 6)
for a1 in actions_1:
for a2 in actions_2:
game_data = copy.deepcopy(save_game_data)
acts = {0: a1, 1: a2}
EnvSimulator.act(game_data, acts)
eq, eq_noitems = EnvSimulator.boards_equal(game_data.board, new_board, True)
if eq_noitems:
return game_data, acts, eq
return None, None, False
game_data, actions, eq = find_actions(save_game_data, actions)
print(f'found game_data: {game_data}\n\n{actions}')
if not game_data:
game_data, actions, eq = find_actions(save_game_data, actions)
game_data, actions, eq = find_actions(save_game_data, actions)
raise ValueError(f'should not happen anymore')
if not eq:
reset = True # EQUAL WITHOUT ITEMS => SOMEWHERE NEW ITEMS AVAILABLE -> RESET
game_data.agents[my_id].ammo = int(obs['ammo'])
game_data.agents[my_id].blast_strength = int(obs['blast_strength'])
game_data.agents[my_id].can_kick = bool(obs['can_kick'])
# update board because of items
game_data.board = new_board
return game_data, actions, reset
@staticmethod
def act(game_data, actions):
if game_data.simulation_bomb_life:
for b in game_data.bombs:
if b.life > game_data.simulation_bomb_life: b.life = game_data.simulation_bomb_life
game_data.board, \
game_data.agents, \
game_data.bombs, \
game_data.items, \
game_data.flames = forward_model.ForwardModel.step(actions,
game_data.board,
game_data.agents,
game_data.bombs,
game_data.items,
game_data.flames)
if game_data.simulation_bomb_life:
for b in game_data.bombs:
if b.life > 2: b.life = 2
# done
game_data.done = forward_model.ForwardModel.get_done(game_data.agents, game_data.step_count,
game_data.max_steps, game_data.game_type, None)
@staticmethod
def get_done(game_data):
return game_data.done
@staticmethod
def get_alive(game_data):
alive = {}
for a in game_data.agents:
alive[a.agent_id] = a.is_alive
return alive
@staticmethod
def get_board(board_size, board_array, init_value=constants.Item.Passage.value):
board = np.ones((board_size, board_size)).astype(np.uint8)
board *= init_value
for x in range(board_size):
for y in range(board_size):
board[x, y] = board_array[x][y]
return board
@staticmethod
def get_position(board, item, is_single_pos):
pos = np.where(board == item)
pos = list(zip(pos[0], pos[1]))
if is_single_pos:
if len(pos) != 1:
raise ValueError("Invalid pos count!", board, item)
return pos[0]
else:
return pos
@staticmethod
def get_valid_actions(board, flames, bombs, agent, actions):
valid_actions = []
invalid_values = None
invalid_positions = None
row, col = agent.position
for action in actions:
if action is constants.Action.Bomb.value:
if agent.ammo > 0: valid_actions.append(action)
else:
if invalid_values is None:
invalid_values = [item.value for item in [constants.Item.Rigid, constants.Item.Wood]]
if not agent.can_kick: invalid_values.append(constants.Item.Bomb.value)
if invalid_positions is None:
invalid_positions = EnvSimulator.get_invalid_positions(board, flames, bombs)
if EnvSimulator.is_valid_direction(board, row, col, action, invalid_values, invalid_positions):
valid_actions.append(action)
return valid_actions
@staticmethod
def boards_equal(board1, board2, ignore_items):
comparison = (board1 == board2).all()
if ignore_items:
board1 = copy.deepcopy(board1)
board2 = copy.deepcopy(board2)
board1[board1 == constants.Item.ExtraBomb.value] = constants.Item.Passage.value
board1[board1 == constants.Item.IncrRange.value] = constants.Item.Passage.value
board1[board1 == constants.Item.Kick.value] = constants.Item.Passage.value
board2[board2 == constants.Item.ExtraBomb.value] = constants.Item.Passage.value
board2[board2 == constants.Item.IncrRange.value] = constants.Item.Passage.value
board2[board2 == constants.Item.Kick.value] = constants.Item.Passage.value
comparison_ignore = (board1 == board2).all()
return comparison, comparison_ignore
return comparison.all()
@staticmethod
def boards_equal_speed(board1, board2, ignore_items):
comparison = (board1 != board2)
if ignore_items:
diff_items = False
b1_diff = board1[comparison]
b2_diff = board2[comparison]
b1_no_items = (b1_diff != constants.Item.ExtraBomb.value) & \
(b1_diff != constants.Item.IncrRange.value) & \
(b1_diff != constants.Item.Kick.value) & \
(b1_diff != constants.Item.Passage.value)
diff_items = b1_no_items.any()
if not diff_items:
b2_no_items = (b2_diff != constants.Item.ExtraBomb.value) & \
(b2_diff != constants.Item.IncrRange.value) & \
(b2_diff != constants.Item.Kick.value) &\
(b2_diff != constants.Item.Passage.value)
diff_items = b2_no_items.any()
return diff_items
return not comparison.any()
@staticmethod
def get_boards_differences(board1, board2):
board1 = copy.deepcopy(board1)
board2 = copy.deepcopy(board2)
board1[board1 == constants.Item.ExtraBomb.value] = constants.Item.Passage.value
board1[board1 == constants.Item.IncrRange.value] = constants.Item.Passage.value
board1[board1 == constants.Item.Kick.value] = constants.Item.Passage.value
board2[board2 == constants.Item.ExtraBomb.value] = constants.Item.Passage.value
board2[board2 == constants.Item.IncrRange.value] = constants.Item.Passage.value
board2[board2 == constants.Item.Kick.value] = constants.Item.Passage.value
a1bomb = a2bomb = kick = flame = False
comparison = (board1 == board2)
diffs = np.where(comparison is False)
if len(diffs) >= 2:
diffs = list(zip(diffs[0], diffs[1]))
for diff in diffs:
prev_item = board1[diff]
new_item = board2[diff]
if prev_item == constants.Item.Agent1.value and new_item == constants.Item.Bomb.value:
a1bomb = True
elif prev_item == constants.Item.Agent2.value and new_item == constants.Item.Bomb.value:
a2bomb = True
elif prev_item == constants.Item.Passage.value and new_item == constants.Item.Bomb.value:
kick = True
elif new_item == constants.Item.Flames.value:
flame = True
else:
raise ValueError('Invalid difference between maps.')
# else:
# print(comparison, "diffs: ", diffs)
return a1bomb, a2bomb, kick, flame
@staticmethod
def get_invalid_positions(board, flames, bombs):
invalid_positions = []
for flame in flames:
if flame.life > 0:
invalid_positions.append(flame.position)
exploded = True
exp_bombs = []
while exploded:
exploded = False
for bomb in bombs:
if bomb not in exp_bombs and (bomb.life is 1 or bomb.position in invalid_positions):
EnvSimulator._get_bomb_fire_positions(board, bomb, invalid_positions)
exp_bombs.append(bomb)
exploded = True
return invalid_positions
@staticmethod
def is_valid_direction(board, row, col, direction, invalid_values, invalid_positions):
'''Determins if a move is in a valid direction'''
if constants.Action(direction) == constants.Action.Up:
return row - 1 >= 0 and board[row - 1][col] not in invalid_values and (
row - 1, col) not in invalid_positions
elif constants.Action(direction) == constants.Action.Down:
return row + 1 < len(board) and board[row + 1][col] not in invalid_values and (
row + 1, col) not in invalid_positions
elif constants.Action(direction) == constants.Action.Left:
return col - 1 >= 0 and board[row][col - 1] not in invalid_values and (
row, col - 1) not in invalid_positions
elif constants.Action(direction) == constants.Action.Right:
return col + 1 < len(board[0]) and board[row][col + 1] not in invalid_values and (
row, col + 1) not in invalid_positions
elif constants.Action(direction) == constants.Action.Stop:
return board[row][col] not in invalid_values and (
row, col) not in invalid_positions
raise constants.InvalidAction("We did not receive a valid direction: ", direction)
@staticmethod
def _get_bomb_fire_positions(board, bomb, fire_pos):
fire_pos.append(bomb.position)
EnvSimulator._get_fire_positions_in_direction(board, bomb.position[0], bomb.position[1],
bomb.blast_strength - 1, 0, 1, fire_pos) # right
EnvSimulator._get_fire_positions_in_direction(board, bomb.position[0], bomb.position[1],
bomb.blast_strength - 1, 0, -1, fire_pos) # left
EnvSimulator._get_fire_positions_in_direction(board, bomb.position[0], bomb.position[1],
bomb.blast_strength - 1, -1, 0, fire_pos) # up
EnvSimulator._get_fire_positions_in_direction(board, bomb.position[0], bomb.position[1],
bomb.blast_strength - 1, 1, 0, fire_pos) # down
@staticmethod
def _get_fire_positions_in_direction(board, x, y, strength, x_dir, y_dir, fire_pos):
if strength <= 0 or not utility.position_on_board(board, (x, y)):
return
next_x = x + x_dir
next_y = y + y_dir
if not utility.position_on_board(board, (next_x, next_y)):
return
if utility.position_in_items(board, (next_x, next_y), [constants.Item.Rigid, constants.Item.Wood]):
return
fire_pos.append((next_x, next_y))
EnvSimulator._get_fire_positions_in_direction(board, next_x, next_y, strength - 1, x_dir, y_dir, fire_pos)
@staticmethod
def get_bomb_items(board, bomb_pos, bomb_strength):
items = []
EnvSimulator._get_items_in_direction(board, bomb_pos, bomb_strength - 1, 0, 1, items)
EnvSimulator._get_items_in_direction(board, bomb_pos, bomb_strength - 1, 0, -1, items)
EnvSimulator._get_items_in_direction(board, bomb_pos, bomb_strength - 1, -1, 0, items)
EnvSimulator._get_items_in_direction(board, bomb_pos, bomb_strength - 1, 1, 0, items)
return items
@staticmethod
def _get_items_in_direction(board, pos, strength, x_dir, y_dir, items):
if strength <= 0 or not utility.position_on_board(board, pos):
return
x, y = pos
next_x = x + x_dir
next_y = y + y_dir
if not utility.position_on_board(board, (next_x, next_y)):
return
item = board[(next_x, next_y)]
try:
if type(item) == tuple:
print(item)
if not item in items:
items.append(item)
except:
if type(item) == tuple:
print(item)
print(item, items)
if utility.position_in_items(board, (next_x, next_y), [constants.Item.Rigid, constants.Item.Wood]):
return
EnvSimulator._get_items_in_direction(board, (next_x, next_y), strength - 1, x_dir, y_dir, items)
@staticmethod
def get_game_state(game_data):
# return game_data, EnvSimulator.get_done(game_data)
return cPickle.dumps(game_data), EnvSimulator.get_done(game_data)
@staticmethod
def get_game_data(game_state):
# return copy.deepcopy(game_state)
return cPickle.loads(game_state)
class GameData:
pass
| [
"copy.deepcopy",
"_pickle.loads",
"pommerman.forward_model.ForwardModel.step",
"_pickle.dumps",
"numpy.ones",
"pommerman.forward_model.ForwardModel.get_done",
"pommerman.utility.get_direction",
"numpy.where",
"pommerman.constants.InvalidAction",
"pommerman.constants.Action",
"pommerman.utility.p... | [((1926, 2053), 'pommerman.forward_model.ForwardModel.get_done', 'forward_model.ForwardModel.get_done', (['game_data.agents', 'game_data.step_count', 'game_data.max_steps', 'game_data.game_type', 'None'], {}), '(game_data.agents, game_data.step_count,\n game_data.max_steps, game_data.game_type, None)\n', (1961, 2053), False, 'from pommerman import forward_model\n'), ((4245, 4269), 'copy.deepcopy', 'copy.deepcopy', (['game_data'], {}), '(game_data)\n', (4258, 4269), False, 'import copy\n'), ((7430, 7561), 'pommerman.forward_model.ForwardModel.step', 'forward_model.ForwardModel.step', (['actions', 'game_data.board', 'game_data.agents', 'game_data.bombs', 'game_data.items', 'game_data.flames'], {}), '(actions, game_data.board, game_data.agents,\n game_data.bombs, game_data.items, game_data.flames)\n', (7461, 7561), False, 'from pommerman import forward_model\n'), ((8023, 8150), 'pommerman.forward_model.ForwardModel.get_done', 'forward_model.ForwardModel.get_done', (['game_data.agents', 'game_data.step_count', 'game_data.max_steps', 'game_data.game_type', 'None'], {}), '(game_data.agents, game_data.step_count,\n game_data.max_steps, game_data.game_type, None)\n', (8058, 8150), False, 'from pommerman import forward_model\n'), ((8881, 8904), 'numpy.where', 'np.where', (['(board == item)'], {}), '(board == item)\n', (8889, 8904), True, 'import numpy as np\n'), ((12105, 12126), 'copy.deepcopy', 'copy.deepcopy', (['board1'], {}), '(board1)\n', (12118, 12126), False, 'import copy\n'), ((12144, 12165), 'copy.deepcopy', 'copy.deepcopy', (['board2'], {}), '(board2)\n', (12157, 12165), False, 'import copy\n'), ((12788, 12817), 'numpy.where', 'np.where', (['(comparison is False)'], {}), '(comparison is False)\n', (12796, 12817), True, 'import numpy as np\n'), ((15567, 15643), 'pommerman.constants.InvalidAction', 'constants.InvalidAction', (['"""We did not receive a valid direction: """', 'direction'], {}), "('We did not receive a valid direction: ', direction)\n", (15590, 15643), False, 'from pommerman import constants\n'), ((16916, 17015), 'pommerman.utility.position_in_items', 'utility.position_in_items', (['board', '(next_x, next_y)', '[constants.Item.Rigid, constants.Item.Wood]'], {}), '(board, (next_x, next_y), [constants.Item.Rigid,\n constants.Item.Wood])\n', (16941, 17015), False, 'from pommerman import utility\n'), ((18334, 18433), 'pommerman.utility.position_in_items', 'utility.position_in_items', (['board', '(next_x, next_y)', '[constants.Item.Rigid, constants.Item.Wood]'], {}), '(board, (next_x, next_y), [constants.Item.Rigid,\n constants.Item.Wood])\n', (18359, 18433), False, 'from pommerman import utility\n'), ((18856, 18881), '_pickle.loads', 'cPickle.loads', (['game_state'], {}), '(game_state)\n', (18869, 18881), True, 'import _pickle as cPickle\n'), ((890, 932), 'pommerman.characters.Bomber', 'characters.Bomber', (['id', 'game_data.game_type'], {}), '(id, game_data.game_type)\n', (907, 932), False, 'from pommerman import characters\n'), ((10246, 10267), 'copy.deepcopy', 'copy.deepcopy', (['board1'], {}), '(board1)\n', (10259, 10267), False, 'import copy\n'), ((10289, 10310), 'copy.deepcopy', 'copy.deepcopy', (['board2'], {}), '(board2)\n', (10302, 10310), False, 'import copy\n'), ((14558, 14585), 'pommerman.constants.Action', 'constants.Action', (['direction'], {}), '(direction)\n', (14574, 14585), False, 'from pommerman import constants\n'), ((16834, 16884), 'pommerman.utility.position_on_board', 'utility.position_on_board', (['board', '(next_x, next_y)'], {}), '(board, (next_x, next_y))\n', (16859, 16884), False, 'from pommerman import utility\n'), ((17956, 18006), 'pommerman.utility.position_on_board', 'utility.position_on_board', (['board', '(next_x, next_y)'], {}), '(board, (next_x, next_y))\n', (17981, 18006), False, 'from pommerman import utility\n'), ((18685, 18709), '_pickle.dumps', 'cPickle.dumps', (['game_data'], {}), '(game_data)\n', (18698, 18709), True, 'import _pickle as cPickle\n'), ((8574, 8607), 'numpy.ones', 'np.ones', (['(board_size, board_size)'], {}), '((board_size, board_size))\n', (8581, 8607), True, 'import numpy as np\n'), ((14758, 14785), 'pommerman.constants.Action', 'constants.Action', (['direction'], {}), '(direction)\n', (14774, 14785), False, 'from pommerman import constants\n'), ((16704, 16744), 'pommerman.utility.position_on_board', 'utility.position_on_board', (['board', '(x, y)'], {}), '(board, (x, y))\n', (16729, 16744), False, 'from pommerman import utility\n'), ((17810, 17847), 'pommerman.utility.position_on_board', 'utility.position_on_board', (['board', 'pos'], {}), '(board, pos)\n', (17835, 17847), False, 'from pommerman import utility\n'), ((3381, 3420), 'pommerman.utility.get_direction', 'utility.get_direction', (['old_pos', 'new_pos'], {}), '(old_pos, new_pos)\n', (3402, 3420), False, 'from pommerman import utility\n'), ((14968, 14995), 'pommerman.constants.Action', 'constants.Action', (['direction'], {}), '(direction)\n', (14984, 14995), False, 'from pommerman import constants\n'), ((15170, 15197), 'pommerman.constants.Action', 'constants.Action', (['direction'], {}), '(direction)\n', (15186, 15197), False, 'from pommerman import constants\n'), ((5809, 5838), 'copy.deepcopy', 'copy.deepcopy', (['save_game_data'], {}), '(save_game_data)\n', (5822, 5838), False, 'import copy\n'), ((15384, 15411), 'pommerman.constants.Action', 'constants.Action', (['direction'], {}), '(direction)\n', (15400, 15411), False, 'from pommerman import constants\n')] |
#coding=utf-8
import numpy as np
import bisect
class Spline:
"""
三次样条类
"""
def __init__(self, x, y):
self.a, self.b, self.c, self.d = [], [], [], []
self.x = x
self.y = y
self.nx = len(x) # dimension of x
h = np.diff(x)
# calc coefficient c
self.a = [iy for iy in y]
# calc coefficient c
A = self.__calc_A(h)
B = self.__calc_B(h)
self.m = np.linalg.solve(A, B)
self.c = self.m / 2.0
# calc spline coefficient b and d
for i in range(self.nx - 1):
self.d.append((self.c[i + 1] - self.c[i]) / (3.0 * h[i]))
tb = (self.a[i + 1] - self.a[i]) / h[i] - h[i] * (self.c[i + 1] + 2.0 * self.c[i]) / 3.0
self.b.append(tb)
def calc(self, t):
"""
计算位置
当t超过边界,返回None
"""
if t < self.x[0]:
return None
elif t > self.x[-1]:
return None
i = self.__search_index(t)
dx = t - self.x[i]
result = self.a[i] + self.b[i] * dx + \
self.c[i] * dx ** 2.0 + self.d[i] * dx ** 3.0
return result
def __search_index(self, x):
return bisect.bisect(self.x, x) - 1
def __calc_A(self, h):
"""
计算算法第二步中的等号左侧的矩阵表达式A
"""
A = np.zeros((self.nx, self.nx))
A[0, 0] = 1.0
for i in range(self.nx - 1):
if i != (self.nx - 2):
A[i + 1, i + 1] = 2.0 * (h[i] + h[i + 1])
A[i + 1, i] = h[i]
A[i, i + 1] = h[i]
A[0, 1] = 0.0
A[self.nx - 1, self.nx - 2] = 0.0
A[self.nx - 1, self.nx - 1] = 1.0
return A
def __calc_B(self, h):
"""
计算算法第二步中的等号右侧的矩阵表达式B
"""
B = np.zeros(self.nx)
for i in range(self.nx - 2):
B[i + 1] = 6.0 * (self.a[i + 2] - self.a[i + 1]) / h[i + 1] - 6.0 * (self.a[i + 1] - self.a[i]) / h[i]
return B
| [
"numpy.zeros",
"numpy.diff",
"numpy.linalg.solve",
"bisect.bisect"
] | [((271, 281), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (278, 281), True, 'import numpy as np\n'), ((451, 472), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'B'], {}), '(A, B)\n', (466, 472), True, 'import numpy as np\n'), ((1334, 1362), 'numpy.zeros', 'np.zeros', (['(self.nx, self.nx)'], {}), '((self.nx, self.nx))\n', (1342, 1362), True, 'import numpy as np\n'), ((1794, 1811), 'numpy.zeros', 'np.zeros', (['self.nx'], {}), '(self.nx)\n', (1802, 1811), True, 'import numpy as np\n'), ((1212, 1236), 'bisect.bisect', 'bisect.bisect', (['self.x', 'x'], {}), '(self.x, x)\n', (1225, 1236), False, 'import bisect\n')] |
import os
import h5py
import numpy as np
from tensorflow.keras import backend as K
from tensorflow.keras.layers import (
AveragePooling2D, Convolution2D, MaxPooling2D, ZeroPadding2D)
from tensorflow.keras.models import Sequential
from image_analogy import img_utils
def img_from_vgg(x):
'''Decondition an image from the VGG16 model.'''
x = x.transpose((1, 2, 0))
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
x = x[:,:,::-1] # to RGB
return x
def img_to_vgg(x):
'''Condition an image for use with the VGG16 model.'''
x = x.astype(np.float)
x = x[:,:,::-1] # to BGR
x[:, :, 0] -= 103.939
x[:, :, 1] -= 116.779
x[:, :, 2] -= 123.68
x = x.transpose((2, 0, 1))
return x
def get_model(img_width, img_height, weights_path='vgg16_weights.h5', pool_mode='avg'):
assert pool_mode in ('avg', 'max'), '`pool_mode` must be "avg" or "max"'
if pool_mode == 'avg':
pool_class = AveragePooling2D
else:
pool_class = MaxPooling2D
pool_pad_mode = 'valid'
conv_pad_mode = 'valid'
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(3, img_height, img_width)))
model.add(Convolution2D(64, (3, 3), activation='relu', padding=conv_pad_mode, name='block1_conv1', kernel_initializer="zeros", bias_initializer="zeros"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, (3, 3), activation='relu', padding=conv_pad_mode, name='block1_conv2', kernel_initializer="zeros", bias_initializer="zeros"))
model.add(pool_class((2, 2), strides=(2, 2), padding=pool_pad_mode))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, (3, 3), activation='relu', padding=conv_pad_mode, name='block2_conv1', kernel_initializer="zeros", bias_initializer="zeros"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, (3, 3), activation='relu', padding=conv_pad_mode, name='block2_conv2', kernel_initializer="zeros", bias_initializer="zeros"))
model.add(pool_class((2, 2), strides=(2, 2), padding=pool_pad_mode))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, (3, 3), activation='relu', padding=conv_pad_mode, name='block3_conv1', kernel_initializer="zeros", bias_initializer="zeros"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, (3, 3), activation='relu', padding=conv_pad_mode, name='block3_conv2', kernel_initializer="zeros", bias_initializer="zeros"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, (3, 3), activation='relu', padding=conv_pad_mode, name='block3_conv3', kernel_initializer="zeros", bias_initializer="zeros"))
model.add(pool_class((2, 2), strides=(2, 2), padding=pool_pad_mode))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu', padding=conv_pad_mode, name='block4_conv1', kernel_initializer="zeros", bias_initializer="zeros"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu', padding=conv_pad_mode, name='block4_conv2', kernel_initializer="zeros", bias_initializer="zeros"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu', padding=conv_pad_mode, name='block4_conv3', kernel_initializer="zeros", bias_initializer="zeros"))
model.add(pool_class((2, 2), strides=(2, 2), padding=pool_pad_mode))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu', padding=conv_pad_mode, name='block5_conv1', kernel_initializer="zeros", bias_initializer="zeros"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu', padding=conv_pad_mode, name='block5_conv2', kernel_initializer="zeros", bias_initializer="zeros"))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu', padding=conv_pad_mode, name='block5_conv3', kernel_initializer="zeros", bias_initializer="zeros"))
model.add(pool_class((2, 2), strides=(2, 2), padding=pool_pad_mode))
# load the weights of the VGG16 networks
# (trained on ImageNet, won the ILSVRC competition in 2014)
# note: when there is a complete match between your model definition
# and your weight savefile, you can simply call model.load_weights(filename)
# model.load_weights(weights_path)
assert os.path.exists(weights_path), 'Model weights not found (see "--vgg-weights" parameter).'
f = h5py.File(weights_path)
for k in range(f.attrs['nb_layers']):
if k >= len(model.layers):
# we don't look at the last (fully-connected) layers in the savefile
break
g = f['layer_{}'.format(k)]
weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
layer = model.layers[k]
if isinstance(layer, Convolution2D):
weights[0] = np.array(weights[0])[:, :, ::-1, ::-1]
weights[0] = img_utils.reshape_weights(weights[0])
layer.set_weights(weights)
#f.close()
return model
| [
"image_analogy.img_utils.reshape_weights",
"h5py.File",
"os.path.exists",
"tensorflow.keras.layers.ZeroPadding2D",
"numpy.array",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Convolution2D"
] | [((1101, 1113), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1111, 1113), False, 'from tensorflow.keras.models import Sequential\n'), ((4385, 4413), 'os.path.exists', 'os.path.exists', (['weights_path'], {}), '(weights_path)\n', (4399, 4413), False, 'import os\n'), ((4482, 4505), 'h5py.File', 'h5py.File', (['weights_path'], {}), '(weights_path)\n', (4491, 4505), False, 'import h5py\n'), ((1128, 1189), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {'input_shape': '(3, img_height, img_width)'}), '((1, 1), input_shape=(3, img_height, img_width))\n', (1141, 1189), False, 'from tensorflow.keras.layers import AveragePooling2D, Convolution2D, MaxPooling2D, ZeroPadding2D\n'), ((1205, 1352), 'tensorflow.keras.layers.Convolution2D', 'Convolution2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""', 'padding': 'conv_pad_mode', 'name': '"""block1_conv1"""', 'kernel_initializer': '"""zeros"""', 'bias_initializer': '"""zeros"""'}), "(64, (3, 3), activation='relu', padding=conv_pad_mode, name=\n 'block1_conv1', kernel_initializer='zeros', bias_initializer='zeros')\n", (1218, 1352), False, 'from tensorflow.keras.layers import AveragePooling2D, Convolution2D, MaxPooling2D, ZeroPadding2D\n'), ((1363, 1384), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (1376, 1384), False, 'from tensorflow.keras.layers import AveragePooling2D, Convolution2D, MaxPooling2D, ZeroPadding2D\n'), ((1400, 1547), 'tensorflow.keras.layers.Convolution2D', 'Convolution2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""', 'padding': 'conv_pad_mode', 'name': '"""block1_conv2"""', 'kernel_initializer': '"""zeros"""', 'bias_initializer': '"""zeros"""'}), "(64, (3, 3), activation='relu', padding=conv_pad_mode, name=\n 'block1_conv2', kernel_initializer='zeros', bias_initializer='zeros')\n", (1413, 1547), False, 'from tensorflow.keras.layers import AveragePooling2D, Convolution2D, MaxPooling2D, ZeroPadding2D\n'), ((1632, 1653), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (1645, 1653), False, 'from tensorflow.keras.layers import AveragePooling2D, Convolution2D, MaxPooling2D, ZeroPadding2D\n'), ((1669, 1817), 'tensorflow.keras.layers.Convolution2D', 'Convolution2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""', 'padding': 'conv_pad_mode', 'name': '"""block2_conv1"""', 'kernel_initializer': '"""zeros"""', 'bias_initializer': '"""zeros"""'}), "(128, (3, 3), activation='relu', padding=conv_pad_mode, name=\n 'block2_conv1', kernel_initializer='zeros', bias_initializer='zeros')\n", (1682, 1817), False, 'from tensorflow.keras.layers import AveragePooling2D, Convolution2D, MaxPooling2D, ZeroPadding2D\n'), ((1828, 1849), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (1841, 1849), False, 'from tensorflow.keras.layers import AveragePooling2D, Convolution2D, MaxPooling2D, ZeroPadding2D\n'), ((1865, 2013), 'tensorflow.keras.layers.Convolution2D', 'Convolution2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""', 'padding': 'conv_pad_mode', 'name': '"""block2_conv2"""', 'kernel_initializer': '"""zeros"""', 'bias_initializer': '"""zeros"""'}), "(128, (3, 3), activation='relu', padding=conv_pad_mode, name=\n 'block2_conv2', kernel_initializer='zeros', bias_initializer='zeros')\n", (1878, 2013), False, 'from tensorflow.keras.layers import AveragePooling2D, Convolution2D, MaxPooling2D, ZeroPadding2D\n'), ((2098, 2119), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (2111, 2119), False, 'from tensorflow.keras.layers import AveragePooling2D, Convolution2D, MaxPooling2D, ZeroPadding2D\n'), ((2135, 2283), 'tensorflow.keras.layers.Convolution2D', 'Convolution2D', (['(256)', '(3, 3)'], {'activation': '"""relu"""', 'padding': 'conv_pad_mode', 'name': '"""block3_conv1"""', 'kernel_initializer': '"""zeros"""', 'bias_initializer': '"""zeros"""'}), "(256, (3, 3), activation='relu', padding=conv_pad_mode, name=\n 'block3_conv1', kernel_initializer='zeros', bias_initializer='zeros')\n", (2148, 2283), False, 'from tensorflow.keras.layers import AveragePooling2D, Convolution2D, MaxPooling2D, ZeroPadding2D\n'), ((2294, 2315), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (2307, 2315), False, 'from tensorflow.keras.layers import AveragePooling2D, Convolution2D, MaxPooling2D, ZeroPadding2D\n'), ((2331, 2479), 'tensorflow.keras.layers.Convolution2D', 'Convolution2D', (['(256)', '(3, 3)'], {'activation': '"""relu"""', 'padding': 'conv_pad_mode', 'name': '"""block3_conv2"""', 'kernel_initializer': '"""zeros"""', 'bias_initializer': '"""zeros"""'}), "(256, (3, 3), activation='relu', padding=conv_pad_mode, name=\n 'block3_conv2', kernel_initializer='zeros', bias_initializer='zeros')\n", (2344, 2479), False, 'from tensorflow.keras.layers import AveragePooling2D, Convolution2D, MaxPooling2D, ZeroPadding2D\n'), ((2490, 2511), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (2503, 2511), False, 'from tensorflow.keras.layers import AveragePooling2D, Convolution2D, MaxPooling2D, ZeroPadding2D\n'), ((2527, 2675), 'tensorflow.keras.layers.Convolution2D', 'Convolution2D', (['(256)', '(3, 3)'], {'activation': '"""relu"""', 'padding': 'conv_pad_mode', 'name': '"""block3_conv3"""', 'kernel_initializer': '"""zeros"""', 'bias_initializer': '"""zeros"""'}), "(256, (3, 3), activation='relu', padding=conv_pad_mode, name=\n 'block3_conv3', kernel_initializer='zeros', bias_initializer='zeros')\n", (2540, 2675), False, 'from tensorflow.keras.layers import AveragePooling2D, Convolution2D, MaxPooling2D, ZeroPadding2D\n'), ((2760, 2781), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (2773, 2781), False, 'from tensorflow.keras.layers import AveragePooling2D, Convolution2D, MaxPooling2D, ZeroPadding2D\n'), ((2797, 2945), 'tensorflow.keras.layers.Convolution2D', 'Convolution2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""', 'padding': 'conv_pad_mode', 'name': '"""block4_conv1"""', 'kernel_initializer': '"""zeros"""', 'bias_initializer': '"""zeros"""'}), "(512, (3, 3), activation='relu', padding=conv_pad_mode, name=\n 'block4_conv1', kernel_initializer='zeros', bias_initializer='zeros')\n", (2810, 2945), False, 'from tensorflow.keras.layers import AveragePooling2D, Convolution2D, MaxPooling2D, ZeroPadding2D\n'), ((2956, 2977), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (2969, 2977), False, 'from tensorflow.keras.layers import AveragePooling2D, Convolution2D, MaxPooling2D, ZeroPadding2D\n'), ((2993, 3141), 'tensorflow.keras.layers.Convolution2D', 'Convolution2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""', 'padding': 'conv_pad_mode', 'name': '"""block4_conv2"""', 'kernel_initializer': '"""zeros"""', 'bias_initializer': '"""zeros"""'}), "(512, (3, 3), activation='relu', padding=conv_pad_mode, name=\n 'block4_conv2', kernel_initializer='zeros', bias_initializer='zeros')\n", (3006, 3141), False, 'from tensorflow.keras.layers import AveragePooling2D, Convolution2D, MaxPooling2D, ZeroPadding2D\n'), ((3152, 3173), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (3165, 3173), False, 'from tensorflow.keras.layers import AveragePooling2D, Convolution2D, MaxPooling2D, ZeroPadding2D\n'), ((3189, 3337), 'tensorflow.keras.layers.Convolution2D', 'Convolution2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""', 'padding': 'conv_pad_mode', 'name': '"""block4_conv3"""', 'kernel_initializer': '"""zeros"""', 'bias_initializer': '"""zeros"""'}), "(512, (3, 3), activation='relu', padding=conv_pad_mode, name=\n 'block4_conv3', kernel_initializer='zeros', bias_initializer='zeros')\n", (3202, 3337), False, 'from tensorflow.keras.layers import AveragePooling2D, Convolution2D, MaxPooling2D, ZeroPadding2D\n'), ((3422, 3443), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (3435, 3443), False, 'from tensorflow.keras.layers import AveragePooling2D, Convolution2D, MaxPooling2D, ZeroPadding2D\n'), ((3459, 3607), 'tensorflow.keras.layers.Convolution2D', 'Convolution2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""', 'padding': 'conv_pad_mode', 'name': '"""block5_conv1"""', 'kernel_initializer': '"""zeros"""', 'bias_initializer': '"""zeros"""'}), "(512, (3, 3), activation='relu', padding=conv_pad_mode, name=\n 'block5_conv1', kernel_initializer='zeros', bias_initializer='zeros')\n", (3472, 3607), False, 'from tensorflow.keras.layers import AveragePooling2D, Convolution2D, MaxPooling2D, ZeroPadding2D\n'), ((3618, 3639), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (3631, 3639), False, 'from tensorflow.keras.layers import AveragePooling2D, Convolution2D, MaxPooling2D, ZeroPadding2D\n'), ((3655, 3803), 'tensorflow.keras.layers.Convolution2D', 'Convolution2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""', 'padding': 'conv_pad_mode', 'name': '"""block5_conv2"""', 'kernel_initializer': '"""zeros"""', 'bias_initializer': '"""zeros"""'}), "(512, (3, 3), activation='relu', padding=conv_pad_mode, name=\n 'block5_conv2', kernel_initializer='zeros', bias_initializer='zeros')\n", (3668, 3803), False, 'from tensorflow.keras.layers import AveragePooling2D, Convolution2D, MaxPooling2D, ZeroPadding2D\n'), ((3814, 3835), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (3827, 3835), False, 'from tensorflow.keras.layers import AveragePooling2D, Convolution2D, MaxPooling2D, ZeroPadding2D\n'), ((3851, 3999), 'tensorflow.keras.layers.Convolution2D', 'Convolution2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""', 'padding': 'conv_pad_mode', 'name': '"""block5_conv3"""', 'kernel_initializer': '"""zeros"""', 'bias_initializer': '"""zeros"""'}), "(512, (3, 3), activation='relu', padding=conv_pad_mode, name=\n 'block5_conv3', kernel_initializer='zeros', bias_initializer='zeros')\n", (3864, 3999), False, 'from tensorflow.keras.layers import AveragePooling2D, Convolution2D, MaxPooling2D, ZeroPadding2D\n'), ((4965, 5002), 'image_analogy.img_utils.reshape_weights', 'img_utils.reshape_weights', (['weights[0]'], {}), '(weights[0])\n', (4990, 5002), False, 'from image_analogy import img_utils\n'), ((4901, 4921), 'numpy.array', 'np.array', (['weights[0]'], {}), '(weights[0])\n', (4909, 4921), True, 'import numpy as np\n')] |
import numpy as np
from collections.abc import Iterable
from typing import Tuple
def fmt_row(width, row):
out = " | ".join(fmt_item(x, width) for x in row)
return out
def fmt_item(x, l):
if isinstance(x, np.ndarray):
assert x.ndim == 0
x = x.item()
if isinstance(x, float):
rep = "%g" % x
else:
rep = str(x)
return " " * (l - len(rep)) + rep
def get_stats(loss, predictions, labels):
cp = np.argmax(predictions.cpu().data.numpy(), 1)
error = np.mean(cp != labels.cpu().data.numpy())
return loss.item(), error
def print_stats(epoch, avg_loss, avg_error, num_batches, time_duration):
print(
fmt_row(10, [
epoch + 1, avg_loss / num_batches, avg_error / num_batches,
time_duration
]))
def print_header():
print(fmt_row(10, ["Epoch", "Train Loss", "Train Error", "Epoch Time"]))
def exclude_from_dict(d, keys):
return {key: d[key] for key in d if key not in keys}
def flatten(l, ignored_values=[], depth: int = np.iinfo(np.int32).max):
if depth == 0:
for e in l:
yield e
else:
for el in l:
if isinstance(el, Iterable) and not isinstance(el, (str, bytes)):
for el2 in flatten(el, ignored_values, depth-1):
if el2 not in ignored_values:
yield el2
elif el not in ignored_values:
yield el
def array_shape(a) -> Tuple[int, ...]:
if isinstance(a[0], Iterable):
return (len(a), *array_shape(a[0]))
else:
return (len(a),)
def static_class(cls):
if getattr(cls, "_static_init_", None):
cls._static_init_()
return cls
| [
"numpy.iinfo"
] | [((1038, 1056), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (1046, 1056), True, 'import numpy as np\n')] |
import numpy as np
from . import itrainer
from .. import network
from typing import Callable, List, Tuple
class StochasticGradientDescent(itrainer.ITrainer):
@staticmethod
def numerical_gradient(f: Callable, x: np.array, delta: float = 1e-4) -> np.array:
grad = np.zeros_like(x)
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
idx = it.multi_index
center_x = float(x[idx])
x[idx] = center_x + delta
f_front = f(x)
x[idx] = center_x - delta
f_rear = f(x)
x[idx] = center_x
grad[idx] = (f_front - f_rear) / (2 * delta)
it.iternext()
return grad
def __init__(self, learning_rate: float = 1e-1, batch_size: int = 100):
self.learning_rate = learning_rate
self.batch_size = batch_size
def train(self, network: network.INetwork, train_image: np.array, train_label: np.array):
for iteration in range(max(1, int(train_image.shape[0] / self.batch_size))):
print(
f'iteration: {iteration} / {int(train_image.shape[0] / self.batch_size)}')
mask = np.random.choice(train_image.shape[0], self.batch_size)
batch_image = train_image[mask]
batch_label = train_label[mask]
grad_weights, grad_biases = self.get_gradients(
network, batch_image, batch_label)
for layer, grad_weight, grad_bias in zip(network.layers, grad_weights, grad_biases):
layer.weight -= self.learning_rate * grad_weight
layer.bias -= self.learning_rate * grad_bias
print(network.get_loss(batch_image, batch_label))
def get_gradients(self, network: network.INetwork, image: np.array, label: np.array) -> Tuple[List[np.array], List[np.array]]:
def get_loss(x): return network.get_loss(image, label)
grad_weights = []
grad_biases = []
for layer in network.layers:
grad_weights.append(
StochasticGradientDescent.numerical_gradient(get_loss, layer.weight))
grad_biases.append(
StochasticGradientDescent.numerical_gradient(get_loss, layer.bias))
return grad_weights, grad_biases
| [
"numpy.nditer",
"numpy.zeros_like",
"numpy.random.choice"
] | [((280, 296), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (293, 296), True, 'import numpy as np\n'), ((311, 370), 'numpy.nditer', 'np.nditer', (['x'], {'flags': "['multi_index']", 'op_flags': "['readwrite']"}), "(x, flags=['multi_index'], op_flags=['readwrite'])\n", (320, 370), True, 'import numpy as np\n'), ((1201, 1256), 'numpy.random.choice', 'np.random.choice', (['train_image.shape[0]', 'self.batch_size'], {}), '(train_image.shape[0], self.batch_size)\n', (1217, 1256), True, 'import numpy as np\n')] |
# Copyright 2019 GreenWaves Technologies, SAS
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import numpy as np
from .utils import pad, srange
LOG = logging.getLogger("nntool." + __name__)
# pylint: disable=too-many-arguments, too-many-locals
def av_pool(params, in_dims, out_dims, in_tensor, qrec=None):
# Prepare the quantization levels
filter_sz = params.filter.h * params.filter.w
if qrec:
pool_factor = (1<<16)//filter_sz
dtype = np.int32
else:
pool_factor = 1.0/filter_sz
dtype = None
out_tensor = np.zeros(out_dims.shape, dtype=dtype)
in_tensor = pad(in_tensor, in_dims, params.padding, params.pad_type)
for in_c in range(out_dims.c):
out_h = 0
for h_idx in range(0, in_dims.h - params.filter.h + params.padding.h + 1,
params.stride.h):
out_w = 0
for w_idx in range(0, in_dims.w - params.filter.w + params.padding.w + 1,
params.stride.w):
# accumulate - potentially with different Q
out_slice_args = srange(out_dims, c=in_c, h=out_h, w=out_w)
in_slice_args =\
srange(in_dims,
c=[in_c, in_c + 1, 1],
h=[h_idx, h_idx + params.filter.h, 1],
w=[w_idx, w_idx + params.filter.w, 1])
sum_ = np.sum(in_tensor[in_slice_args], dtype=dtype)
prod_ = np.multiply(sum_, pool_factor, dtype=dtype)
out_tensor[out_slice_args] = prod_
out_w += 1
out_h += 1
if qrec:
return qrec.out_qs[0].clip(out_tensor >> 16)
return out_tensor
def max_pool(params, in_dims, out_dims, in_tensor, qrec=None):
dtype = qrec.out_qs[0].dtype if qrec else None
out_tensor = np.zeros(out_dims.shape, dtype=dtype)
if params.padding.h + params.padding.w > 0:
in_tensor = pad(in_tensor, in_dims, params.padding, params.pad_type)
for in_c in range(out_dims.c):
out_h = 0
for h_idx in range(0, in_dims.h - params.filter.h + params.padding.h + 1,
params.stride.h):
out_w = 0
for w_idx in range(0, in_dims.w - params.filter.w + params.padding.w + 1,
params.stride.w):
# accumulate - potentially with different Q
out_slice_args = srange(out_dims, c=in_c, h=out_h, w=out_w)
in_slice_args =\
srange(in_dims,
c=[in_c, in_c + 1, 1],
h=[h_idx, h_idx + params.filter.h, 1],
w=[w_idx, w_idx + params.filter.w, 1])
out_tensor[out_slice_args] = np.max(in_tensor[in_slice_args].view(np.ndarray))
out_w += 1
out_h += 1
return out_tensor
| [
"numpy.multiply",
"numpy.zeros",
"numpy.sum",
"logging.getLogger"
] | [((661, 700), 'logging.getLogger', 'logging.getLogger', (["('nntool.' + __name__)"], {}), "('nntool.' + __name__)\n", (678, 700), False, 'import logging\n'), ((1071, 1108), 'numpy.zeros', 'np.zeros', (['out_dims.shape'], {'dtype': 'dtype'}), '(out_dims.shape, dtype=dtype)\n', (1079, 1108), True, 'import numpy as np\n'), ((2369, 2406), 'numpy.zeros', 'np.zeros', (['out_dims.shape'], {'dtype': 'dtype'}), '(out_dims.shape, dtype=dtype)\n', (2377, 2406), True, 'import numpy as np\n'), ((1932, 1977), 'numpy.sum', 'np.sum', (['in_tensor[in_slice_args]'], {'dtype': 'dtype'}), '(in_tensor[in_slice_args], dtype=dtype)\n', (1938, 1977), True, 'import numpy as np\n'), ((2002, 2045), 'numpy.multiply', 'np.multiply', (['sum_', 'pool_factor'], {'dtype': 'dtype'}), '(sum_, pool_factor, dtype=dtype)\n', (2013, 2045), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import numpy as np
def _mZdif(data_difuse, cbus, Sb):
bus = cbus.tbus
nP = cbus.nP
busP = cbus.mbusp
zdif = np.zeros([nP, 3])
zcent = np.zeros([nP, 1])
auxDeltaZ = np.zeros([nP, 3])
for i in range(bus):
for j in range(nP):
if(data_difuse[i, 0] == busP[0, j]):
aux1 = np.matrix([data_difuse[i, 1], data_difuse[i, 2], data_difuse[i, 3]])
aux2 = np.matrix([data_difuse[i, 9], data_difuse[i, 8], data_difuse[i, 7]])
paux = np.subtract(aux1, aux2)/Sb
zdif[j, :] = paux
zcent[j, 0] = zdif[j, 1]
auxDeltaZ[j, :] = zdif[j, :] - zcent[j, 0]
class _CZdif:
def __init__(self, mzdif, mzcent, mdeltaz):
self.mzdif = mzdif
self.mzcent = mzcent
self.mdeltaz = mdeltaz
cdif = _CZdif(zdif, zcent, auxDeltaZ)
return cdif
def _matrixA(clines, cbus, data_lines):
Zaux = clines.mzauxlin
X_lines = clines.mxlin
lines = clines.nlines
busref = cbus.nbusref
nP = cbus.nP
mSens = np.zeros([lines, nP])
mposSens = np.zeros([lines, nP])
mnegSens = np.zeros([lines, nP])
posx = 0
posy = 0
textauxP = ""
for i in range(lines):
posx = int(data_lines[i, 0])
posy = int(data_lines[i, 1])
textauxP = textauxP + "_P" + str(int(posx)) + str(int(posy))
for j in range(nP):
if(posx != busref and posy != busref):
mSens[i, j] = (Zaux[posx-2, j] - Zaux[posy-2, j])/X_lines[posx-1, posy-1]
elif(posx == busref):
mSens[i, j] = (0-Zaux[posy-2, j])/X_lines[posx-1, posy-1]
elif(posy == busref):
mSens[i, j] = (Zaux[posx-2, j]-0)/X_lines[posx-1, posy-1]
for j in range(nP):
if(mSens[i, j] < 0):
mnegSens[i, j] = mSens[i, j]
else:
mposSens[i, j] = mSens[i, j]
textauxP = "Rows:" + textauxP
class _CSens:
def __init__(self, msens, msensn, msensp, txtpij):
self.msens = msens
self.msensn = msensn
self.msensp = msensp
self.txtpij = txtpij
csens = _CSens(mSens, mnegSens, mposSens, textauxP)
return csens
def _mXdif(zaux, cdif, nP):
auxDeltaZ = cdif.mdeltaz
zcent = cdif.mzcent
auxDeltaX = np.zeros([nP, 3])
#Radians
auxDeltaX = (np.matmul(zaux, auxDeltaZ)*180)/np.pi
auxXcent = (np.matmul(zaux, zcent)*180)/np.pi
auxXdif = auxDeltaX + auxXcent
class _CXdif:
def __init__(self, mdeltax, mxcent, mxdif):
self.mdeltax = mdeltax
self.mxcent = mxcent
self.mxdif = mxdif
cxdif = _CXdif(auxDeltaX, auxXcent, auxXdif)
return cxdif
def _Pdifuse(zdif, csens, nP, nlines):
mnegSens = csens.msensn
mposSens = csens.msensp
zmin = np.zeros([nP, 1])
zcent = np.zeros([nP, 1])
zmax = np.zeros([nP, 1])
pmin = np.zeros([nlines, 1])
pcent = np.zeros([nlines, 1])
pmax = np.zeros([nlines, 1])
zmin[:, 0] = zdif[:, 0]
zcent[:, 0] = zdif[:, 1]
zmax[:, 0] = zdif[:, 2]
pmin = np.matmul(mposSens, zmin) + np.matmul(mnegSens, zmax)
pcent = np.matmul(mposSens, zcent) + np.matmul(mnegSens, zcent)
pmax = np.matmul(mposSens, zmax) + np.matmul(mnegSens, zmin)
pdif = np.concatenate((pmin, pcent, pmax), axis=1)
return pdif
| [
"numpy.matrix",
"numpy.subtract",
"numpy.zeros",
"numpy.matmul",
"numpy.concatenate"
] | [((172, 189), 'numpy.zeros', 'np.zeros', (['[nP, 3]'], {}), '([nP, 3])\n', (180, 189), True, 'import numpy as np\n'), ((202, 219), 'numpy.zeros', 'np.zeros', (['[nP, 1]'], {}), '([nP, 1])\n', (210, 219), True, 'import numpy as np\n'), ((236, 253), 'numpy.zeros', 'np.zeros', (['[nP, 3]'], {}), '([nP, 3])\n', (244, 253), True, 'import numpy as np\n'), ((1127, 1148), 'numpy.zeros', 'np.zeros', (['[lines, nP]'], {}), '([lines, nP])\n', (1135, 1148), True, 'import numpy as np\n'), ((1164, 1185), 'numpy.zeros', 'np.zeros', (['[lines, nP]'], {}), '([lines, nP])\n', (1172, 1185), True, 'import numpy as np\n'), ((1201, 1222), 'numpy.zeros', 'np.zeros', (['[lines, nP]'], {}), '([lines, nP])\n', (1209, 1222), True, 'import numpy as np\n'), ((2403, 2420), 'numpy.zeros', 'np.zeros', (['[nP, 3]'], {}), '([nP, 3])\n', (2411, 2420), True, 'import numpy as np\n'), ((2915, 2932), 'numpy.zeros', 'np.zeros', (['[nP, 1]'], {}), '([nP, 1])\n', (2923, 2932), True, 'import numpy as np\n'), ((2945, 2962), 'numpy.zeros', 'np.zeros', (['[nP, 1]'], {}), '([nP, 1])\n', (2953, 2962), True, 'import numpy as np\n'), ((2974, 2991), 'numpy.zeros', 'np.zeros', (['[nP, 1]'], {}), '([nP, 1])\n', (2982, 2991), True, 'import numpy as np\n'), ((3003, 3024), 'numpy.zeros', 'np.zeros', (['[nlines, 1]'], {}), '([nlines, 1])\n', (3011, 3024), True, 'import numpy as np\n'), ((3037, 3058), 'numpy.zeros', 'np.zeros', (['[nlines, 1]'], {}), '([nlines, 1])\n', (3045, 3058), True, 'import numpy as np\n'), ((3070, 3091), 'numpy.zeros', 'np.zeros', (['[nlines, 1]'], {}), '([nlines, 1])\n', (3078, 3091), True, 'import numpy as np\n'), ((3391, 3434), 'numpy.concatenate', 'np.concatenate', (['(pmin, pcent, pmax)'], {'axis': '(1)'}), '((pmin, pcent, pmax), axis=1)\n', (3405, 3434), True, 'import numpy as np\n'), ((3193, 3218), 'numpy.matmul', 'np.matmul', (['mposSens', 'zmin'], {}), '(mposSens, zmin)\n', (3202, 3218), True, 'import numpy as np\n'), ((3221, 3246), 'numpy.matmul', 'np.matmul', (['mnegSens', 'zmax'], {}), '(mnegSens, zmax)\n', (3230, 3246), True, 'import numpy as np\n'), ((3259, 3285), 'numpy.matmul', 'np.matmul', (['mposSens', 'zcent'], {}), '(mposSens, zcent)\n', (3268, 3285), True, 'import numpy as np\n'), ((3288, 3314), 'numpy.matmul', 'np.matmul', (['mnegSens', 'zcent'], {}), '(mnegSens, zcent)\n', (3297, 3314), True, 'import numpy as np\n'), ((3326, 3351), 'numpy.matmul', 'np.matmul', (['mposSens', 'zmax'], {}), '(mposSens, zmax)\n', (3335, 3351), True, 'import numpy as np\n'), ((3354, 3379), 'numpy.matmul', 'np.matmul', (['mnegSens', 'zmin'], {}), '(mnegSens, zmin)\n', (3363, 3379), True, 'import numpy as np\n'), ((2451, 2477), 'numpy.matmul', 'np.matmul', (['zaux', 'auxDeltaZ'], {}), '(zaux, auxDeltaZ)\n', (2460, 2477), True, 'import numpy as np\n'), ((2505, 2527), 'numpy.matmul', 'np.matmul', (['zaux', 'zcent'], {}), '(zaux, zcent)\n', (2514, 2527), True, 'import numpy as np\n'), ((379, 447), 'numpy.matrix', 'np.matrix', (['[data_difuse[i, 1], data_difuse[i, 2], data_difuse[i, 3]]'], {}), '([data_difuse[i, 1], data_difuse[i, 2], data_difuse[i, 3]])\n', (388, 447), True, 'import numpy as np\n'), ((471, 539), 'numpy.matrix', 'np.matrix', (['[data_difuse[i, 9], data_difuse[i, 8], data_difuse[i, 7]]'], {}), '([data_difuse[i, 9], data_difuse[i, 8], data_difuse[i, 7]])\n', (480, 539), True, 'import numpy as np\n'), ((563, 586), 'numpy.subtract', 'np.subtract', (['aux1', 'aux2'], {}), '(aux1, aux2)\n', (574, 586), True, 'import numpy as np\n')] |
#!/usr/bin/env python
#
# Copyright 2019 <NAME>.
#
# This file is part of Mi3-GPU.
#
# Mi3-GPU is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Mi3-GPU is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mi3-GPU. If not, see <http://www.gnu.org/licenses/>.
#
#Contact: allan.haldane _AT_ gmail.com
import numpy as np
from numpy.random import randint, rand
from scipy.special import logsumexp
import pyopencl as cl
import pyopencl.array as cl_array
import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse
from utils.seqload import loadSeqs, writeSeqs
from utils.changeGauge import fieldlessGaugeEven
from utils import printsome, getLq, unimarg
from mcmcGPU import setup_GPU_context, initGPU, wgsize_heuristic, printGPUs
import NewtonSteps
try:
from shlex import quote as cmd_quote
except ImportError:
from pipes import quote as cmd_quote
from node_manager import GPU_node
MPI = None
def setup_MPI():
global MPI, mpi_comm, mpi_rank
global MPI_multinode_controller, MPI_GPU_node, MPI_worker
from mpi4py import MPI
mpi_comm = MPI.COMM_WORLD
if mpi_comm.Get_size() == 1:
MPI = None
else:
mpi_rank = mpi_comm.Get_rank()
from mpi_manager import (MPI_multinode_controller,
MPI_GPU_node, MPI_worker)
################################################################################
# Set up enviroment and some helper functions
progname = 'Mi3.py'
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if not (exc.errno == errno.EEXIST and os.path.isdir(path)):
raise
scriptPath = os.path.dirname(os.path.realpath(__file__))
scriptfile = os.path.join(scriptPath, "mcmc.cl")
class attrdict(dict):
def __getattr__(self, attr):
if attr.startswith('_'):
return super().__getattr__(attr)
try:
return dict.__getitem__(self, attr)
except KeyError:
return None
#identical calculation as CL kernel, but with high precision (to check fp error)
def getEnergiesMultiPrec(s, couplings):
from mpmath import mpf, mp
mp.dps = 32
couplings = [[mpf(float(x)) for x in r] for r in couplings]
pairenergy = [mpf(0) for n in range(s.shape[0])]
s = s.astype('i4')
for n,(i,j) in enumerate([(i,j) for i in range(L-1) for j in range(i+1,L)]):
r = couplings[n]
cpl = (r[b] for b in (q*s[:,i] + s[:,j]))
pairenergy = [x+n for x,n in zip(pairenergy, cpl)]
return pairenergy
################################################################################
def optionRegistry():
options = []
add = lambda opt, **kwds: options.append((opt, kwds))
# option used by both potts and sequence loaders, designed
# to load in the output of a previous run
add('init_model', default='independent',
help=("One of 'zero', 'independent', or a directory name. Generates or "
"loads 'alpha', 'couplings', 'seedseq' and 'seqs', if not "
"otherwise supplied.") )
add('outdir', default='output', help='Output Directory')
add('finish', help='Dir. of an unfinished run to finish')
#add('continue', help='Dir. of finished run, to start a new run from')
add('config', #is_config_file_arg=True,
help='config file to load arguments from')
add('rngseed', type=np.uint32, help='random seed')
# GPU options
add('nwalkers', type=np.uint32,
help="Number of MC walkers")
add('nsteps', type=np.uint32, default=2048,
help="number of mc steps per kernel call")
add('wgsize', default=512, help="GPU workgroup size")
add('gpus',
help="GPUs to use (comma-sep list of platforms #s, eg '0,0')")
add('profile', action='store_true',
help="enable OpenCL profiling")
add('nlargebuf', type=np.uint32, default=1,
help='size of large seq buffer, in multiples of nwalkers')
add('measurefperror', action='store_true',
help="enable fp error calculation")
# Newton options
add('bimarg', required=True,
help="Target bivariate marginals (npy file)")
add('mcsteps', type=np.uint32, default=64,
help="Number of rounds of MCMC generation")
add('newtonsteps', default=1024, type=np.uint32,
help="Initial number of newton steps per round.")
add('newton_delta', default=32, type=np.uint32,
help="Newton step number tuning scale")
add('fracNeff', type=np.float32, default=0.9,
help="stop coupling updates after Neff/N = fracNeff")
add('gamma', type=np.float32, default=0.0004,
help="Initial step size")
add('damping', default=0.001, type=np.float32,
help="Damping parameter")
add('reg', default=None,
help="regularization format")
add('preopt', action='store_true',
help="Perform a round of newton steps before first MCMC run")
add('reseed',
choices=['none', 'single_best', 'single_random', 'single_indep',
'independent', 'uniform', 'msa'],
default='single_indep',
help="Strategy to reset walkers after each MCMC round")
add('seedmsa', default=None,
help="seed used of reseed=msa")
add('distribute_jstep', choices=['head_gpu', 'head_node', 'all'],
default='all',
help="how to split newton step computation across GPUs")
# Potts options
add('alpha', required=True,
help="Alphabet, a sequence of letters")
add('couplings',
help="One of 'zero', 'independent', or a filename")
add('L', help="sequence length", type=int)
# Sequence options
add('seedseq', help="Starting sequence. May be 'rand'")
add('seqs', help="File containing sequences to pre-load to GPU")
add('seqs_large', help="File containing sequences to pre-load to GPU")
add('seqbimarg',
help="bimarg used to generate independent model sequences")
# Sampling Param
add('equiltime', default='auto',
help="Number of MC kernel calls to equilibrate")
add('min_equil', default=64, type=int,
help="minimum MC calls to equilibrate when using 'equiltime=auto'")
add('trackequil', type=np.uint32, default=0,
help='Save bimarg every TRACKEQUIL steps during equilibration')
add('tempering',
help='optional inverse Temperature schedule')
add('nswaps_temp', type=np.uint32, default=128,
help='optional number of pt swaps')
return dict(options)
def addopt(parser, groupname, optstring):
if groupname is not None:
group = parser.add_argument_group(groupname)
add = group.add_argument
else:
add = parser.add_argument
for option in optstring.split():
optargs = addopt.options[option]
add('--' + option, **optargs)
addopt.options = optionRegistry()
def requireargs(args, required):
required = required.split()
args = vars(args)
for r in required:
if args[r] is None:
raise Exception("error: argument --{} is required".format(r))
def setup_seed(args, p, log):
if args.rngseed is not None:
seed = args.rngseed
else:
seed = np.frombuffer(os.urandom(4), dtype='u4')[0]
# set up numpy rng seed on head node (used in seq gen)
np.random.seed(seed)
# set rngseed param, used in mcmcGPU for randomized mutation pos
p['rngseed'] = seed + 1 # +1 just so rng for seq gen is diff from mcmc
log("Using random seed {}".format(p.rngseed))
log("")
def describe_tempering(args, p, log):
if p.tempering is not None:
if len(p.tempering) == p.nwalkers:
msg = ("The walkers are assigned temperatures from file {}"
).format(args.tempering)
else:
msg = ("The walkers are divided into {} temperature groups ({})"
).format(len(p.tempering), args.tempering)
log(("Parallel tempering: {}, and neighbor temperatures are "
"swapped {} times after every MCMC loop. The low-temperature "
"B is {}").format(msg, p.nswaps, np.max(p.tempering)))
def print_node_startup(log):
log("Hostname: {}".format(socket.gethostname()))
log("Start Time: {}".format(datetime.datetime.now()))
if 'PBS_JOBID' in os.environ:
log("Job name: {}".format(os.environ['PBS_JOBID']))
def setup_exit_hook(log):
def exiter():
if MPI:
mpi_comm.Abort()
log("Exited at {}".format(datetime.datetime.now()))
atexit.register(exiter)
# Normal exit when killed
signal.signal(signal.SIGTERM, lambda signum, stack_frame: exit(1))
################################################################################
def divideWalkers(nwalkers, ngpus, log, wgsize=None):
n_max = (nwalkers-1)//ngpus + 1
nwalkers_gpu = [n_max]*(ngpus-1) + [nwalkers - (ngpus-1)*n_max]
if wgsize is not None and nwalkers % (ngpus*wgsize) != 0:
log("Warning: number of MCMC walkers is not a multiple of "
"wgsize*ngpus, so there are idle work units.")
return nwalkers_gpu
def worker_GPU_main():
def log(*x):
pass
p = mpi_comm.bcast(None, root=0)
# see setup_GPUs_MPI for head node setup
clinfo, gpudevs, ptx = setup_GPU_context(scriptPath, scriptfile, p, log)
mpi_comm.gather(len(gpudevs), root=0)
gpu_ids = mpi_comm.scatter(None, root=0)
gpus = [initGPU(id, clinfo, dev, nwalk, p, log)
for dev,(id, nwalk) in zip(gpudevs, gpu_ids)]
worker = MPI_worker(mpi_rank, gpus)
worker.listen()
def setup_GPUs_MPI(p, log):
# setup context for head node
clinfo, gpudevs, cllog = setup_GPU_context(scriptPath, scriptfile, p, log)
with open(os.path.join(p.outdir, 'ptx'), 'wt') as f:
f.write(cllog[1])
f.write(cllog[0])
# gather GPU setup info from all nodes
mpi_comm.bcast(p, root=0)
node_ngpus = mpi_comm.gather(len(gpudevs), root=0)
ngpus = sum(node_ngpus)
gpuwalkers = divideWalkers(p.nwalkers, ngpus, log, p.wgsize)
gpu_param = list(enumerate(gpuwalkers))
gpu_param = [[gpu_param.pop(0) for i in range(n)] for n in node_ngpus]
gpu_param = mpi_comm.scatter(gpu_param, root=0)
log("Found {} GPUs over {} nodes".format(ngpus, len(node_ngpus)))
log("Starting GPUs...")
# initialize head node gpus
headgpus = [initGPU(id, clinfo, dev, nwalk, p, log)
for dev,(id, nwalk) in zip(gpudevs, gpu_param)]
workers = ([GPU_node(headgpus)] +
[MPI_GPU_node(r+1, n) for r, n in enumerate(node_ngpus[1:])])
gpus = MPI_multinode_controller(workers)
log('Running on GPUs:\n' +
"\n".join(' {} ({} walkers)'.format(n, nwalk)
for n, nwalk in zip(gpus.gpu_list, gpuwalkers)))
return gpus
def setup_GPUs(p, log):
if MPI:
return setup_GPUs_MPI(p, log)
clinfo, gpudevs, cllog = setup_GPU_context(scriptPath, scriptfile, p, log)
with open(os.path.join(p.outdir, 'ptx'), 'wt') as f:
f.write(cllog[1])
f.write(cllog[0])
ngpus = len(gpudevs)
gpuwalkers = divideWalkers(p.nwalkers, ngpus, log, p.wgsize)
gpu_param = enumerate(gpuwalkers)
log("Found {} GPUs".format(ngpus))
log("GPU Initialization:")
headgpus = [initGPU(id, clinfo, dev, nwalk, p, log)
for dev,(id, nwalk) in zip(gpudevs, gpu_param)]
gpus = GPU_node(headgpus)
log('Running on GPUs:\n' +
"\n".join(' {} ({} walkers)'.format(n, nwalk)
for n, nwalk in zip(gpus.gpu_list, gpuwalkers)))
return gpus
################################################################################
def inverseIsing(orig_args, args, log):
descr = ('Inverse Ising inference using a quasi-Newton MCMC algorithm '
'on the GPU')
parser = argparse.ArgumentParser(prog=progname + ' inverseIsing',
description=descr)
addopt(parser, 'GPU options', 'nwalkers nsteps wgsize '
'gpus profile')
addopt(parser, 'Sequence Options', 'seedseq seqs seqs_large')
addopt(parser, 'Newton Step Options', 'bimarg mcsteps newtonsteps '
'newton_delta fracNeff '
'damping reg distribute_jstep gamma '
'preopt reseed seedmsa')
addopt(parser, 'Sampling Options', 'equiltime min_equil '
'trackequil tempering nswaps_temp ')
addopt(parser, 'Potts Model Options', 'alpha couplings L')
addopt(parser, None, 'init_model outdir rngseed config '
'finish')
args = parser.parse_args(args)
args.measurefperror = False
print_node_startup(log)
log("")
log("Command line arguments:")
log(" ".join(cmd_quote(a) for a in orig_args))
log("")
if MPI:
log("MPI detected using {} processes".format(mpi_comm.Get_size()))
log("")
log("Initialization")
log("===============")
if args.finish:
# search for last completed run (contains a perturbedJ file)
rundirs = glob.glob(os.path.join(args.finish, 'run_*'))
rundirs.sort()
rundir = None
for fn in reversed(rundirs):
if os.path.isfile(os.path.join(fn, 'perturbedJ.npy')):
rundir = fn
break
if rundir is None:
raise Exception("Did not find any runs in {}".format(args.finish))
log("Continuing from {}".format(rundir))
args.init_model = rundir
log("")
with open(os.path.join(args.finish, 'config.json'), 'r') as f:
args.__dict__ = json.load(f)
startrun = int(re.match('[0-9]*$', rundir).groups()) + 1
else:
startrun = 0
mkdir_p(args.outdir)
with open(os.path.join(args.outdir, 'command.txt'), 'w') as f:
f.write(" ".join(cmd_quote(a) for a in orig_args))
# collect all detected parameters in "p"
p = attrdict({'outdir': args.outdir})
setup_seed(args, p, log)
p.update(process_newton_args(args, log))
if p.bimarg is not None:
p['L'], p['q'] = getLq(p.bimarg)
p.update(process_potts_args(args, p.L, p.q, p.bimarg, log))
L, q, alpha = p.L, p.q, p.alpha
p.update(process_sample_args(args, log))
gpup = process_GPU_args(args, L, q, p.outdir, log)
p.update(gpup)
gpus = setup_GPUs(p, log)
gpus.initMCMC(p.nsteps)
gpus.initJstep()
# first gpu/node may need to store all collected seqs
if p.distribute_jstep == 'head_gpu':
gpus.head_gpu.initLargeBufs(gpus.nwalkers)
elif p.distribute_jstep == 'head_node':
if not MPI:
raise Exception('"head_node" option only makes sense when '
'using MPI')
nlrg = divideWalkers(gpus.nwalkers, gpus.head_node.ngpus, log, p.wgsize)
gpus.head_node.initLargeBufs(nlrg)
else: # all
pass
log("")
# figure out how many sequences we need to initialize
needed_seqs = None
use_seed = p.reseed in ['single_best', 'single_random']
if p.preopt or (p.reseed == 'none'):
needed_seqs = gpus.nseq['main']
p.update(process_sequence_args(args, L, alpha, p.bimarg, log,
nseqs=needed_seqs, needseed=use_seed))
if p.reseed == 'msa':
seedseqs = loadSequenceFile(args.seedmsa, alpha, log)
seedseqs = repeatseqs(seedseqs, gpus.nseq['main'])
p['seedmsa'] = np.split(seedseqs, gpus.ngpus)
# initialize main buffers with any given sequences
if p.preopt or p.reseed == 'none':
if p.seqs is None:
raise Exception("Need to provide seqs if not using seedseq")
log("")
log("Initializing main seq buf with loaded seqs.")
gpus.setSeqs('main', p.seqs, log)
elif use_seed and p.seedseq is None:
raise Exception("Must provide seedseq if using reseed=single_*")
log("")
log("Computation Overview")
log("====================")
log("Running {} Newton-MCMC rounds".format(p.mcmcsteps))
if p.equiltime == 'auto':
log(("In each round, running {} MC walkers until equilibrated, with a "
"minimum of {} equilibration loops").format(
p.nwalkers, p.min_equil))
else:
log(("In each round, running {} MC walkers for {} equilibration loops "
"with {} MC steps per loop (Each walker equilibrated a total of "
"{} MC steps, or {:.1f} steps per position)."
).format(p.nwalkers, p.equiltime, p.nsteps, p.nsteps*p.equiltime,
p.nsteps*p.equiltime/p.L))
describe_tempering(args, p, log)
N = p.nwalkers
if p.tempering:
B0 = p.tempering[0]
N = np.sum(p.tempering == B0)
f = p.bimarg
expected_SSR = np.sum(f*(1-f))/N
absexp = np.sqrt(2/np.pi)*np.sqrt(f*(1-f)/N)/f
expected_Ferr = np.mean(absexp[f>0.01])
log("\nEstimated lowest achievable statistical error for this nwalkers and "
"bimarg is:\nMIN: SSR = {:.4f} Ferr = {:.3f}".format(expected_SSR,
expected_Ferr))
log("(Statistical error only. Modeling biases and perturbation procedure "
"may cause additional error)")
log("")
log("")
log("MCMC Run")
log("========")
p['max_ns'] = 2048
p['peak_ns'] = 256
p['cur_ns'] = 256
NewtonSteps.newtonMCMC(p, gpus, startrun, log)
def getEnergies(orig_args, args, log):
descr = ('Compute Potts Energy of a set of sequences')
parser = argparse.ArgumentParser(prog=progname + ' getEnergies',
description=descr)
add = parser.add_argument
add('out', default='output', help='Output File')
addopt(parser, 'GPU Options', 'wgsize gpus profile')
addopt(parser, 'Potts Model Options', 'alpha couplings')
addopt(parser, 'Sequence Options', 'seqs')
addopt(parser, None, 'outdir')
#genenergies uses a subset of the full inverse ising parameters,
#so use custom set of params here
args = parser.parse_args(args)
args.measurefperror = False
requireargs(args, 'couplings alpha seqs')
log("Initialization")
log("===============")
log("")
p = attrdict({'outdir': args.outdir})
mkdir_p(args.outdir)
p.update(process_potts_args(args, None, None, None, log))
L, q, alpha = p.L, p.q, p.alpha
log("Sequence Setup")
log("--------------")
seqs = loadSequenceFile(args.seqs, alpha, log)
if seqs is None:
raise Exception("seqs must be supplied")
log("")
args.nwalkers = len(seqs)
args.nsteps = 1
args.nlargebuf = 1
gpup = process_GPU_args(args, L, q, p.outdir, log)
p.update(gpup)
gpus = setup_GPUs(p, log)
gpus.setSeqs('main', seqs, log)
log("")
log("Computing Energies")
log("==================")
gpus.setBuf('J', p.couplings)
gpus.calcEnergies('main')
es = gpus.collect('E main')
log("Saving results to file '{}'".format(args.out))
np.save(args.out, es)
#def getBimarg(orig_args, args, log):
# descr = ('Compute bimarg of a set of sequences')
# parser = argparse.ArgumentParser(prog=progname + ' getBimarg',
# description=descr)
# add = parser.add_argument
# add('out', default='output', help='Output File')
# addopt(parser, 'GPU Options', 'wgsize gpus profile')
# addopt(parser, 'Potts Model Options', 'alpha')
# addopt(parser, 'Sequence Options', 'seqs')
# addopt(parser, None, 'outdir')
# args = parser.parse_args(args)
# args.measurefperror = False
# requireargs(args, 'alpha seqs')
# log("Initialization")
# log("===============")
# log("")
# p = attrdict({'outdir': args.outdir})
# mkdir_p(args.outdir)
# alpha = args.alpha.strip()
# p['alpha'] = alpha
# q = len(alpha)
# p['q'] = q
# log("Sequence Setup")
# log("--------------")
# seqs = loadSequenceFile(args.seqs, alpha, log)
# L = seqs.shape[1]
# p['L'] = L
# if seqs is None:
# raise Exception("seqs must be supplied")
# log("")
# args.nwalkers = len(seqs)
# args.nsteps = 1
# args.nlargebuf = 1
# gpup, cldat, gdevs = process_GPU_args(args, L, q, p.outdir, log)
# p.update(gpup)
# gpuwalkers = divideWalkers(p.nwalkers, len(gdevs), log, p.wgsize)
# gpus = [initGPU(n, cldat, dev, nwalk, p, log)
# for n,(dev, nwalk) in enumerate(zip(gdevs, gpuwalkers))]
# gpus.setSeqs('main', seqs, log)
# log("")
# log("Computing Bimarg")
# log("==================")
# for gpu in gpus:
# gpu.calcBicounts('main')
# gpu.bicounts_to_bimarg(gpu.nseq['main'])
# bbb = readGPUbufs(['bi'], gpus)[0]
# merge_device_bimarg(gpus)
# bimarg = gpus[0].getBuf('bi').read()
# bicounts = readGPUbufs(['bicount'], gpus)[0]
# log("Saving results to file '{}'".format(args.out))
# for n,b in enumerate(bicounts):
# np.save(os.path.join(p.outdir, 'bicount-{}'.format(n)), b)
# for n,b in enumerate(bbb):
# np.save(os.path.join(p.outdir, 'bimarg-{}'.format(n)), b)
# np.save(args.out, bimarg)
def MCMCbenchmark(orig_args, args, log):
descr = ('Benchmark MCMC generation on the GPU')
parser = argparse.ArgumentParser(prog=progname + ' benchmark',
description=descr)
add = parser.add_argument
add('--nloop', type=np.uint32, required=True,
help="Number of kernel calls to benchmark")
addopt(parser, 'GPU options', 'nwalkers nsteps wgsize '
'gpus profile')
addopt(parser, 'Sequence Options', 'seedseq seqs')
addopt(parser, 'Potts Model Options', 'alpha couplings L')
addopt(parser, None, 'init_model outdir rngseed')
args = parser.parse_args(args)
nloop = args.nloop
args.measurefperror = False
print_node_startup(log)
log("Initialization")
log("===============")
log("")
p = attrdict({'outdir': args.outdir})
mkdir_p(args.outdir)
setup_seed(args, p, log)
p.update(process_potts_args(args, p.L, p.q, None, log))
L, q, alpha = p.L, p.q, p.alpha
#args.nlargebuf = 1
setup_seed(args, p, log)
gpup = process_GPU_args(args, L, q, p.outdir, log)
p.update(gpup)
gpus = setup_GPUs(p, log)
gpus.initMCMC(p.nsteps)
# figure out how many sequences we need to initialize
needed_seqs = None
use_seed = False
if args.seqs is not None:
needed_seqs = gpus.nseq['main']
elif args.seedseq is not None:
use_seed = True
else:
raise Exception("'seqs' or 'seedseq' option required")
p.update(process_sequence_args(args, L, alpha, p.bimarg, log,
nseqs=needed_seqs, needseed=use_seed))
if p.reseed == 'msa':
seedseqs = loadSequenceFile(args.seedmsa, alpha, log)
seedseqs = repeatseqs(seedseqs, gpus.nseq['main'])
p['seedmsa'] = np.split(seedseqs, gpus.ngpus)
# initialize main buffers with any given sequences
if use_seed:
if p.seedseq is None:
raise Exception("Must provide seedseq if using reseed=single_*")
gpus.fillSeqs(p.seedseq)
else:
if p.seqs is None:
raise Exception("Need to provide seqs if not using seedseq")
gpus.setSeqs('main', p.seqs, log)
log("")
log("Benchmark")
log("=========")
log("")
log("Benchmarking MCMC for {} loops, {} MC steps per loop".format(
nloop, p.nsteps))
import time
def runMCMC():
for i in range(nloop):
gpus.runMCMC()
gpus.wait()
#initialize
gpus.setBuf('J', p.couplings)
#warmup
log("Warmup run...")
runMCMC()
#timed run
log("Timed run...")
start = time.perf_counter()
runMCMC()
end = time.perf_counter()
log("Elapsed time: ", end - start)
totsteps = p.nwalkers*nloop*np.float64(p.nsteps)
steps_per_second = totsteps/(end-start)
log("MC steps computed: {}".format(totsteps))
log("MC steps per second: {:g}".format(steps_per_second))
## quick sanity check as a bonus
#gpus.calcEnergies('main')
#es = gpus.collect('E main')
#log("\nConsistency check: <E> = ", np.mean(es), np.std(es))
def equilibrate(orig_args, args, log):
descr = ('Run a round of MCMC generation on the GPU')
parser = argparse.ArgumentParser(prog=progname + ' mcmc',
description=descr)
add = parser.add_argument
addopt(parser, 'GPU options', 'nwalkers nsteps wgsize '
'gpus profile')
addopt(parser, 'Sequence Options', 'seedseq seqs seqbimarg')
addopt(parser, 'Sampling Options', 'equiltime min_equil '
'trackequil tempering nswaps_temp')
addopt(parser, 'Potts Model Options', 'alpha couplings L')
addopt(parser, None, 'init_model outdir rngseed')
args = parser.parse_args(args)
args.measurefperror = False
log("")
log("Command line arguments:")
log(" ".join(cmd_quote(a) for a in orig_args))
log("")
log("Initialization")
log("===============")
p = attrdict({'outdir': args.outdir})
mkdir_p(args.outdir)
setup_seed(args, p, log)
p.update(process_potts_args(args, None, None, None, log))
L, q, alpha = p.L, p.q, p.alpha
p.update(process_sample_args(args, log))
if p.equiltime == 'auto':
rngPeriod = 0
else:
rngPeriod = p.equiltime
gpup = process_GPU_args(args, L, q, p.outdir, log)
p.update(gpup)
gpus = setup_GPUs(p, log)
gpus.initMCMC(p.nsteps)
nseqs = None
needseed = False
if args.seqs is not None:
nseqs = gpus.nseq['main']
if args.seedseq is not None:
needseed = True
else:
nseqs = p.nwalkers
p.update(process_sequence_args(args, L, alpha, None, log, nseqs=nseqs,
needseed=needseed))
log("")
log("Computation Overview")
log("====================")
if p.equiltime == 'auto':
log(("In each round, running {} MC walkers until equilibrated, with a "
"minimum of {} equilibration loops").format(
p.nwalkers, p.min_equil))
else:
log(("Running {} MC walkers for {} equilibration loops "
"with {} MC steps per loop (Each walker equilibrated a total of "
"{} MC steps, or {:.1f} steps per position)."
).format(p.nwalkers, p.equiltime, p.nsteps, p.nsteps*p.equiltime,
p.nsteps*p.equiltime/p.L))
describe_tempering(args, p, log)
# set up gpu buffers
if needseed:
gpus.fillSeqs(p.seedseq)
else:
gpus.setSeqs('main', p.seqs, log)
gpus.setBuf('J', p.couplings)
log("")
log("Equilibrating")
log("====================")
MCMC_func = NewtonSteps.runMCMC
# set up tempering if needed
if p.tempering is not None:
MCMC_func = NewtonSteps.runMCMC_tempered
B0 = np.max(p.tempering)
if p.nwalkers % len(p.tempering) != 0:
raise Exception("# of temperatures must evenly divide # walkers")
Bs = np.concatenate([full(p.nwalkers/len(p.tempering), b, dtype='f4')
for b in p.tempering])
Bs = split(Bs, len(gpus))
for B,gpu in zip(Bs, gpus):
gpu.setBuf('Bs', B)
gpu.markSeqs(B == B0)
(bimarg_model,
bicount,
sampledenergies,
e_rho,
ptinfo,
equilsteps) = MCMC_func(gpus, p.couplings, 'gen', p, log)
seqs = gpus.collect('seq main')
outdir = p.outdir
np.savetxt(os.path.join(outdir, 'bicounts'), bicount, fmt='%d')
np.save(os.path.join(outdir, 'bimarg'), bimarg_model)
np.save(os.path.join(outdir, 'energies'), sampledenergies)
writeSeqs(os.path.join(outdir, 'seqs'), seqs, alpha)
if p.tempering is not None:
e, b = readGPUbufs(['E main', 'Bs'], gpus)
np.save(os.path.join(outdir, 'walker_Bs'), np.concatenate(b))
np.save(os.path.join(outdir, 'walker_Es'), np.concatenate(e))
log("Final PT swap rate: {}".format(ptinfo[1]))
log("Mean energy:", np.mean(sampledenergies))
log("Done!")
def subseqFreq(orig_args, args, log):
descr = ('Compute relative frequency of subsequences at fixed positions')
parser = argparse.ArgumentParser(prog=progname + ' subseqFreq',
description=descr)
add = parser.add_argument
add('fixpos', help="comma separated list of fixed positions")
add('out', default='output', help='Output File')
addopt(parser, 'GPU options', 'wgsize gpus')
addopt(parser, 'Potts Model Options', 'alpha couplings L')
addopt(parser, None, 'outdir')
group = parser.add_argument_group('Sequence Options')
add = group.add_argument
add('backgroundseqs', help="large sample of equilibrated sequences")
add('subseqs', help="sequences from which to compute subseq freqs")
args = parser.parse_args(args)
args.measurefperror = False
log("Initialization")
log("===============")
log("")
p = attrdict({'outdir': args.outdir})
args.trackequil = 0
mkdir_p(args.outdir)
p.update(process_potts_args(args, p.L, p.q, None, log))
L, q, alpha = p.L, p.q, p.alpha
# try to load sequence files
bseqs = loadSequenceFile(args.backgroundseqs, alpha, log)
sseqs = loadSequenceFile(args.subseqs, alpha, log)
args.nwalkers = 1
gpup, cldat, gdevs = process_GPU_args(args, L, q, p.outdir, 1, log)
p.update(gpup)
p.nsteps = 1
gpuwalkers = divideWalkers(len(bseqs), len(gdevs), log, p.wgsize)
gpus = [initGPU(n, cldat, dev, len(sseqs), nwalk, p, log)
for n,(dev, nwalk) in enumerate(zip(gdevs, gpuwalkers))]
#fix positions
fixedpos = np.array([int(x) for x in args.fixpos.split(',')])
fixedmarks = np.zeros(L, dtype='u1')
fixedmarks[fixedpos] = 1
#load buffers
gpubseqs = split(bseqs, np.cumsum(gpuwalkers)[:-1])
for gpu,bs in zip(gpus, gpubseqs):
gpu.setBuf('seq main', sseqs)
gpu.setBuf('seq large', bs)
gpu.markPos(fixedmarks)
gpu.setBuf('J', p.couplings)
log("")
log("Subsequence Frequency Calculation")
log("=================================")
log("")
for gpu in gpus:
gpu.calcEnergies('large')
origEs = np.concatenate(readGPUbufs(['E large'], gpus)[0])
log("Getting substituted energies...")
subseqE = []
logf = np.zeros(len(sseqs))
for n in range(len(sseqs)):
# replaced fixed positions by subsequence, and calc energies
for gpu in gpus:
gpu.copySubseq(n)
gpu.calcEnergies('large')
energies = np.concatenate(readGPUbufs(['E large'], gpus)[0])
logf[n] = logsumexp(origEs - energies)
#save result
log("Saving result (log frequency) to file {}".format(args.out))
np.save(args.out, logf)
def nestedZ(args, log):
raise Exception("Not implemented yet")
# Plan would be to implement nested sampling algorithm to compute Z.
# Use parallel method described in
# Exploring the energy landscapes of protein folding simulations with
# Bayesian computation
# <NAME>, <NAME>, <NAME> and <NAME>
# we can probably do K = 1024, P = 256 or even better
#
# see also:
# Nested sampling for Potts models
# Murray, MacKay, MacKay, MacKay, NIPS 2005
# (This is specifically for 2-d finite-range Potts models)
#
# Nested sampling, statistical physics and the Potts model
# Pfeifenberger, Rumetshofer, <NAME>, 2017
def ExactZS(args, log):
raise Exception("Not implemented yet")
# plan would be to implement exact solution of small systems by enumeration
# on GPU. Would need to compute energy of all sequences, so kernel would be
# similar to energy calculation kernel, except the actual sequences would
# not need to be loaded from memory, but could be computed on the fly. Z =
# sum(exp(-E)), and S = -sum(p*log(p))
#
# For q=8, probably limited to about L=16: Would precompute partial E
# for positions 1-10 to a buffer, then have GPU kernel iterate over
# positions 11-16, one wu per subsequence, with knl looping over
# precomputed part. Precomputed part would be 4G.
#
# Or maybe, would do it in chunks of 8 positions, storing the partial
# energies recursively. So first do 1-8, storing all E to buf (64M). Then
# for each E, compute 9-16, storing these in next row of E (64M).
# Total memory neededwould be 64M * L/8 or 8*L Mb. Kernel would need to
# do > (8**8)**(L/8) of these E loops.
def testing(orig_args, args, log):
descr = ('Compute Potts Energy of a set of sequences')
parser = argparse.ArgumentParser(prog=progname + ' getEnergies',
description=descr)
add = parser.add_argument
#add('out', default='output', help='Output File')
addopt(parser, 'GPU Options', 'wgsize gpus profile')
addopt(parser, 'Potts Model Options', 'alpha couplings')
addopt(parser, 'Newton Step Options', 'bimarg ')
addopt(parser, 'Sequence Options', 'seqs')
addopt(parser, None, 'outdir')
#genenergies uses a subset of the full inverse ising parameters,
#so use custom set of params here
args = parser.parse_args(args)
args.measurefperror = False
requireargs(args, 'couplings alpha seqs')
log("Initialization")
log("===============")
log("")
p = attrdict({'outdir': args.outdir})
mkdir_p(args.outdir)
p.update(process_potts_args(args, None, None, None, log))
L, q, alpha = p.L, p.q, p.alpha
log("Sequence Setup")
log("--------------")
seqs = loadSequenceFile(args.seqs, alpha, log)
if seqs is None:
raise Exception("seqs must be supplied")
log("")
args.nwalkers = len(seqs)
args.nsteps = 1
args.nlargebuf = 1
gpup = process_GPU_args(args, L, q, p.outdir, log)
p.update(gpup)
gpus = setup_GPUs(p, log)
gpus.initMCMC(63)
gpus.initJstep()
p['bimarg'] = np.load(args.bimarg)
log("")
log("Setup:")
J = p.couplings
gpus.setSeqs('main', seqs, log)
gpus.setBuf('J', J)
gpus.fillBuf('dJ', 0)
gpus.setBuf('bi target', p.bimarg)
gpus.calcBicounts('main')
gpus.bicounts_to_bimarg('main')
gpus.merge_bimarg()
bi = gpus.head_gpu.readBufs('bi')[0]
gamma = 0.004
pc = 0.2
lJ = 0.1
import utils.changeGauge as changeGauge
JJ = np.zeros(J.shape, dtype='f4')
JJ[:,1] = 3
gpus.setBuf('J', JJ)
gpus.reg_ddE(gamma, pc, lJ)
dJ = gpus.head_gpu.getBuf('dJ')[0].read()
print(JJ)
print(dJ)
print(dJ[0].reshape((q,q)))
def R(J):
J = J.reshape((nPair, q, q))
Rab = np.zeros(J.shape)
b,a = np.meshgrid(np.arange(q), np.arange(q))
for g in range(1,q):
jr = J[...,a,b] - J[...,(a+g)%q,b]
for d in range(1,q):
jc = -J[...,a,(b+d)%q] + J[...,(a+g)%q,(b+d)%q]
Rab += jr + jc
R = np.sum(Rab, axis=(-1,-2))
print(R(JJ))
##gpus.updateJ_l1z(gamma, pc, lam, Jbuf='J')
##J0cpu = changeGauge.zeroGauge(None, p.couplings)[1]
##np.save(os.path.join(p.outdir, 'J0gpu'), J0gpu)
##np.save(os.path.join(p.outdir, 'J0cpu'), J0cpu)
##log("Coupling results:")
##log("GPU:", printsome(J0gpu))
##log("CPU:", printsome(J0cpu))
#gpus.updateJ_l1z(gamma, pc, lJ, Jbuf='J')
#Jgpu = gpus.head_gpu.getBuf('J')[0].read()
#log("bim:", printsome(bi))
#log("bimt:", printsome(p.bimarg))
#log("df:", printsome(p.bimarg - bi))
#Jcpu = J - gamma*(p.bimarg - bi)/(bi + pc)
#log("org:", printsome(J, prec=6))
#log("unr:", printsome(Jcpu, prec=6))
#J0 = changeGauge.zeroGauge(None, Jcpu)[1]
#log("J0: ", printsome(J0, prec=6))
#R = -lJ*np.sign(J0)*gamma/(bi + pc)
#cond = np.sign(J0) != np.sign(J0 + R)
#cont = np.ones(J0.shape, dtype=bool)
#Jcpu[cond] = Jcpu[cond] - J0[cond]
#Jcpu[~cond] = Jcpu[~cond] + R[~cond]
#log("GPU:", printsome(Jgpu, prec=6))
#log("CPU:", printsome(Jcpu, prec=6))
#J0p = changeGauge.zeroGauge(None, Jcpu)[1]
#log("J0: ", printsome(J0p, prec=6))
#np.save(os.path.join(p.outdir, 'J'), J)
#np.save(os.path.join(p.outdir, 'Jgpu'), Jgpu)
#np.save(os.path.join(p.outdir, 'Jcpu'), Jcpu)
##log("Computing Energies")
##log("==================")
##t1 = time.time()
##for i in range(1000):
## for gpu in gpus:
## gpu.calcEnergies('main')
##es = np.concatenate(readGPUbufs(['E main'], gpus)[0])
##print("Time", time.time() - t1)
##log(printsome(es))
##log("Saving results to file '{}'".format(args.out))
##np.save(args.out, es)
################################################################################
def process_GPU_args(args, L, q, outdir, log):
log("GPU setup")
log("---------")
param = attrdict({'nsteps': args.nsteps,
'wgsize': args.wgsize,
'nwalkers': args.nwalkers,
'gpuspec': args.gpus,
'profile': args.profile,
'fperror': args.measurefperror})
p = attrdict(param.copy())
p.update({'L': L, 'q': q, 'outdir': outdir})
p['wgsize'] = wgsize_heuristic(p.q, p.wgsize)
log("Total GPU walkers: {}".format(p.nwalkers))
log("Work Group Size: {}".format(p.wgsize))
log("{} MC steps per MCMC kernel call".format(p.nsteps))
if p.profile:
log("Profiling Enabled")
return p
def process_newton_args(args, log):
log("Newton Solver Setup")
log("-------------------")
mcmcsteps = args.mcsteps
log("Running {} Newton-MCMC rounds".format(mcmcsteps))
param = {'mcmcsteps': args.mcsteps,
'newtonSteps': args.newtonsteps,
'newton_delta': args.newton_delta,
'fracNeff': args.fracNeff,
'gamma0': args.gamma,
'pcdamping': args.damping,
'reseed': args.reseed,
'preopt': args.preopt,
'distribute_jstep': args.distribute_jstep}
p = attrdict(param)
log("Updating J locally with gamma={}, and pc-damping {}".format(
str(p.gamma0), str(p.pcdamping)))
log("Running {} Newton update steps per round.".format(p.newtonSteps))
log("Using {}-GPU mode for Newton-step calculations.".format(
p.distribute_jstep))
log("Reading target marginals from file {}".format(args.bimarg))
bimarg = np.load(args.bimarg)
if bimarg.dtype != np.dtype('<f4'):
raise Exception("Bimarg must be in 'f4' format")
#could convert, but this helps warn that something may be wrong
if np.any((bimarg <= 0) | (bimarg > 1)):
raise Exception("All bimarg must be 0 < f < 1")
log("Target Marginals: " + printsome(bimarg) + "...")
p['bimarg'] = bimarg
if args.reg is not None:
rtype, dummy, rarg = args.reg.partition(':')
rtypes = ['l2z', 'l1z', 'X', 'ddE']
if rtype not in rtypes:
raise Exception("reg must be one of {}".format(str(rtypes)))
p['reg'] = rtype
rargs = rarg.split(',')
if rtype == 'X':
log("Regularizing with X from file {}".format(rargs[0]))
p['regarg'] = np.load(rargs[0])
if p['regarg'].shape != bimarg.shape:
raise Exception("X in wrong format")
elif rtype == 'ddE':
lam = float(rargs[0])
log("Regularizing using ddE with lambda = {}".format(lam))
p['regarg'] = (lam,)
elif rtype == 'l2z' or rtype == 'l1z':
try:
lJ = float(rargs[0])
log(("Regularizing using {} norm with lambda_J = {}"
" and lambda_h = {}").format(rtype, lJ, lh))
except:
raise Exception("{r} specifier must be of form '{r}:lh,lJ', eg "
"'{r}:0.01,0.01'. Got '{}'".format(rtype, args.reg))
p['regarg'] = (lJ,)
log("")
return p
def updateLq(L, q, newL, newq, name):
# update L and q with new values, checking that they
# are the same as the old values if not None
if newL is not None:
if L is not None and L != newL:
raise Exception("L from {} ({}) inconsitent with previous "
"value ({})".format(name, newL, L))
L = newL
if newq is not None:
if q is not None and q != newq:
raise Exception("q from {} ({}) inconsitent with previous "
"value ({})".format(name, newq, q))
q = newq
return L, q
def process_potts_args(args, L, q, bimarg, log):
log("Potts Model Setup")
log("-----------------")
# we try to infer L and q from any values given. The possible sources
# * command line options -L and -q
# * from bivariate_target dimensions
# * from coupling dimensions
alpha = args.alpha.strip()
argL = args.L if hasattr(args, 'L') else None
L, q = updateLq(argL, len(alpha), L, q, 'bimarg')
# next try to get couplings (may determine L, q)
couplings, L, q = getCouplings(args, L, q, bimarg, log)
# we should have L and q by this point
log("alphabet: {}".format(alpha))
log("q {} L {}".format(q, L))
log("Couplings: " + printsome(couplings) + "...")
log("")
return attrdict({'L': L, 'q': q, 'alpha': alpha,
'couplings': couplings})
def getCouplings(args, L, q, bimarg, log):
couplings = None
if args.couplings is None and args.init_model in ['uniform', 'independent']:
args.couplings = args.init_model
if args.couplings:
#first try to generate couplings (requires L, q)
if args.couplings in ['uniform', 'independent']:
if L is None: # we are sure to have q
raise Exception("Need L to generate couplings")
if args.couplings == 'uniform':
log("Setting Initial couplings to uniform frequencies")
h = -np.log(1.0/q)
J = np.zeros((L*(L-1)//2,q*q), dtype='<f4')
couplings = fieldlessGaugeEven(h, J)[1]
elif args.couplings == 'independent':
log("Setting Initial couplings to independent model")
if bimarg is None:
raise Exception("Need bivariate marginals to generate "
"independent model couplings")
h = -np.log(unimarg(bimarg))
J = np.zeros((L*(L-1)//2,q*q), dtype='<f4')
couplings = fieldlessGaugeEven(h, J)[1]
else: #otherwise load them from file
log("Reading couplings from file {}".format(args.couplings))
couplings = np.load(args.couplings)
if couplings.dtype != np.dtype('<f4'):
raise Exception("Couplings must be in 'f4' format")
elif args.init_model and args.init_model not in ['uniform', 'independent']:
# and otherwise try to load them from model directory
fn = os.path.join(args.init_model, 'J.npy')
if os.path.isfile(fn):
log("Reading couplings from file {}".format(fn))
couplings = np.load(fn)
if couplings.dtype != np.dtype('<f4'):
raise Exception("Couplings must be in 'f4' format")
else:
raise Exception("could not find file {}".format(fn))
else:
raise Exception("didn't get couplings or init_model")
L2, q2 = getLq(couplings)
L, q = updateLq(L, q, L2, q2, 'couplings')
if couplings is None:
raise Exception("Could not find couplings. Use either the "
"'couplings' or 'init_model' options.")
return couplings, L, q
def repeatseqs(seqs, n):
return np.repeat(seqs, (n-1)//seqs.shape[0] + 1, axis=0)[:n,:]
def process_sequence_args(args, L, alpha, bimarg, log,
nseqs=None, needseed=False):
log("Sequence Setup")
log("--------------")
if bimarg is None and (hasattr(args, 'seqbimarg') and
args.seqbimarg is not None):
log("loading bimarg from {} for independent model sequence "
"generation".format(args.seqbimarg))
bimarg = np.load(args.seqbimarg)
q = len(alpha)
seedseq, seqs = None, None
# try to load sequence files
if nseqs is not None:
if args.seqs in ['uniform', 'independent']:
seqs = generateSequences(args.seqs, L, q, nseqs, bimarg, log)
writeSeqs(os.path.join(args.outdir, 'initial_seqs'), seqs, alpha)
elif args.seqs is not None:
seqs = loadSequenceFile(args.seqs, alpha, log)
elif args.init_model in ['uniform', 'independent']:
seqs = generateSequences(args.init_model, L, q, nseqs, bimarg, log)
elif args.init_model is not None:
seqs = loadSequenceDir(args.init_model, '', alpha, log)
if nseqs is not None and seqs is None:
raise Exception("Did not find requested {} sequences".format(nseqs))
n_loaded = seqs.shape[0]
if nseqs > n_loaded:
log("Repeating {} sequences to make {}".format(n_loaded, nseqs))
seqs = repeatseqs(seqs, nseqs)
elif nseqs < n_loaded:
log("Truncating {} sequences to make {}".format( n_loaded, nseqs))
seqs = seqs[:nseqs]
# try to get seed seq
if needseed:
if args.seedseq in ['uniform', 'independent']:
seedseq = generateSequences(args.seedseq, L, q, 1, bimarg, log)[0]
seedseq_origin = args.seedseq
elif args.seedseq is not None: # given string
try:
seedseq = np.array([alpha.index.index(c) for c in args.seedseq],
dtype='<u1')
seedseq_origin = 'supplied'
except:
seedseq = loadseedseq(args.seedseq, args.alpha.strip(), log)
seedseq_origin = 'from file'
elif args.init_model in ['uniform', 'independent']:
seedseq = generateSequences(args.init_model, L, q, 1, bimarg, log)[0]
seedseq_origin = args.init_model
elif args.init_model is not None:
seedseq = loadseedseq(os.path.join(args.init_model, 'seedseq'),
args.alpha.strip(), log)
seedseq_origin = 'from file'
log("Seed seq ({}): {}".format(seedseq_origin,
"".join(alpha[x] for x in seedseq)))
log("")
return attrdict({'seedseq': seedseq,
'seqs': seqs})
def generateSequences(gentype, L, q, nseqs, bimarg, log):
if gentype == 'zero' or gentype == 'uniform':
log("Generating {} random sequences...".format(nseqs))
return randint(0, q, size=(nseqs, L)).astype('<u1')
elif gentype == 'independent':
log("Generating {} independent-model sequences...".format(nseqs))
if bimarg is None:
raise Exception("Bimarg must be provided to generate sequences")
cumprob = np.cumsum(unimarg(bimarg), axis=1)
cumprob = cumprob/(cumprob[:,-1][:,None]) #correct fp errors?
return np.array([np.searchsorted(cp, rand(nseqs)) for cp in cumprob],
dtype='<u1').T
raise Exception("Unknown sequence generation mode '{}'".format(gentype))
def loadseedseq(fn, alpha, log):
log("Reading seedseq from file {}".format(fn))
with open(fn) as f:
seedseq = f.readline().strip()
seedseq = np.array([alpha.index(c) for c in seedseq], dtype='<u1')
return seedseq
def loadSequenceFile(sfile, alpha, log):
log("Loading sequences from file {}".format(sfile))
seqs = loadSeqs(sfile, names=alpha)[0].astype('<u1')
log("Found {} sequences".format(seqs.shape[0]))
return seqs
def loadSequenceDir(sdir, bufname, alpha, log):
log("Loading {} sequences from dir {}".format(bufname, sdir))
sfile = os.path.join(sdir, 'seqs')
seqs = loadSeqs(sfile, names=alpha)[0].astype('<u1')
log("Found {} sequences".format(seqs.shape[0]))
return seqs
def process_sample_args(args, log):
p = attrdict({'equiltime': args.equiltime,
'min_equil': args.min_equil,
'trackequil': args.trackequil})
if p['equiltime'] != 'auto':
p['equiltime'] = int(p['equiltime'])
if 'tempering' in args and args.tempering:
try:
Bs = np.load(args.tempering)
except:
Bs = np.array([x for x in args.tempering.split(",")], dtype='f4')
p['tempering'] = Bs
p['nswaps'] = args.nswaps_temp
log("MCMC Sampling Setup")
log("-------------------")
if p.equiltime == 'auto':
log('Using "auto" equilibration')
else:
log(("In each MCMC round, running {} GPU MCMC kernel calls"
).format(p.equiltime))
if 'tempering' in p:
log("Parallel tempering with inverse temperatures {}, "
"swapping {} times per loop".format(args.tempering, p.nswaps))
if p.equiltime != 'auto' and p.trackequil != 0:
if p.equiltime%p.trackequil != 0:
raise Exception("Error: trackequil must be a divisor of equiltime")
log("Tracking equilibration every {} loops.".format(p.trackequil))
log("")
return p
################################################################################
class CLInfoAction(argparse.Action):
def __init__(self, option_strings, dest=argparse.SUPPRESS,
default=argparse.SUPPRESS, help=None):
super(CLInfoAction, self).__init__(option_strings=option_strings,
dest=dest, default=default, nargs=0, help=help)
def __call__(self, parser, namespace, values, option_string=None):
printGPUs(print)
parser.exit()
def main(args):
actions = {
'infer': inverseIsing,
'energies': getEnergies,
#'getBimarg': getBimarg,
'benchmark': MCMCbenchmark,
'subseq': subseqFreq,
'gen': equilibrate,
'test': testing,
#'nestedZ': nestedZ,
#'measureFPerror': measureFPerror,
}
descr = 'Perform biophysical Potts Model calculations on the GPU'
parser = argparse.ArgumentParser(description=descr, add_help=False)
add = parser.add_argument
add('action', choices=actions.keys(), nargs='?', default=None,
help="Computation to run")
add('--clinfo', action=CLInfoAction, help="Display detected GPUs")
add('--mpi', action='store_true', help="Enable MPI")
add('-h', '--help', action='store_true',
help="show this help message and exit")
known_args, remaining_args = parser.parse_known_args(args)
if known_args.action is None:
if known_args.help:
print(parser.format_help())
return
print(parser.format_usage())
return
if known_args.help:
remaining_args.append('-h')
if known_args.mpi:
setup_MPI()
if mpi_rank != 0:
worker_GPU_main()
return
actions[known_args.action](args, remaining_args, print)
if __name__ == '__main__':
setup_exit_hook(print)
main(sys.argv[1:])
| [
"atexit.register",
"numpy.load",
"numpy.random.seed",
"argparse.ArgumentParser",
"numpy.sum",
"utils.printsome",
"mpmath.mpf",
"numpy.mean",
"numpy.arange",
"os.path.isfile",
"scipy.special.logsumexp",
"numpy.random.randint",
"mpi_manager.MPI_worker",
"numpy.float64",
"os.path.join",
"... | [((2096, 2131), 'os.path.join', 'os.path.join', (['scriptPath', '"""mcmc.cl"""'], {}), "(scriptPath, 'mcmc.cl')\n", (2108, 2131), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((2055, 2081), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2071, 2081), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((7667, 7687), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (7681, 7687), True, 'import numpy as np\n'), ((8885, 8908), 'atexit.register', 'atexit.register', (['exiter'], {}), '(exiter)\n', (8900, 8908), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((9630, 9679), 'mcmcGPU.setup_GPU_context', 'setup_GPU_context', (['scriptPath', 'scriptfile', 'p', 'log'], {}), '(scriptPath, scriptfile, p, log)\n', (9647, 9679), False, 'from mcmcGPU import setup_GPU_context, initGPU, wgsize_heuristic, printGPUs\n'), ((9894, 9920), 'mpi_manager.MPI_worker', 'MPI_worker', (['mpi_rank', 'gpus'], {}), '(mpi_rank, gpus)\n', (9904, 9920), False, 'from mpi_manager import MPI_multinode_controller, MPI_GPU_node, MPI_worker\n'), ((10033, 10082), 'mcmcGPU.setup_GPU_context', 'setup_GPU_context', (['scriptPath', 'scriptfile', 'p', 'log'], {}), '(scriptPath, scriptfile, p, log)\n', (10050, 10082), False, 'from mcmcGPU import setup_GPU_context, initGPU, wgsize_heuristic, printGPUs\n'), ((10966, 10999), 'mpi_manager.MPI_multinode_controller', 'MPI_multinode_controller', (['workers'], {}), '(workers)\n', (10990, 10999), False, 'from mpi_manager import MPI_multinode_controller, MPI_GPU_node, MPI_worker\n'), ((11278, 11327), 'mcmcGPU.setup_GPU_context', 'setup_GPU_context', (['scriptPath', 'scriptfile', 'p', 'log'], {}), '(scriptPath, scriptfile, p, log)\n', (11295, 11327), False, 'from mcmcGPU import setup_GPU_context, initGPU, wgsize_heuristic, printGPUs\n'), ((11770, 11788), 'node_manager.GPU_node', 'GPU_node', (['headgpus'], {}), '(headgpus)\n', (11778, 11788), False, 'from node_manager import GPU_node\n'), ((12201, 12276), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': "(progname + ' inverseIsing')", 'description': 'descr'}), "(prog=progname + ' inverseIsing', description=descr)\n", (12224, 12276), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((17418, 17443), 'numpy.mean', 'np.mean', (['absexp[f > 0.01]'], {}), '(absexp[f > 0.01])\n', (17425, 17443), True, 'import numpy as np\n'), ((17941, 17987), 'NewtonSteps.newtonMCMC', 'NewtonSteps.newtonMCMC', (['p', 'gpus', 'startrun', 'log'], {}), '(p, gpus, startrun, log)\n', (17963, 17987), False, 'import NewtonSteps\n'), ((18100, 18174), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': "(progname + ' getEnergies')", 'description': 'descr'}), "(prog=progname + ' getEnergies', description=descr)\n", (18123, 18174), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((19609, 19630), 'numpy.save', 'np.save', (['args.out', 'es'], {}), '(args.out, es)\n', (19616, 19630), True, 'import numpy as np\n'), ((21867, 21939), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': "(progname + ' benchmark')", 'description': 'descr'}), "(prog=progname + ' benchmark', description=descr)\n", (21890, 21939), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((24482, 24501), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (24499, 24501), False, 'import time\n'), ((24526, 24545), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (24543, 24545), False, 'import time\n'), ((25073, 25140), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': "(progname + ' mcmc')", 'description': 'descr'}), "(prog=progname + ' mcmc', description=descr)\n", (25096, 25140), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((29110, 29183), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': "(progname + ' subseqFreq')", 'description': 'descr'}), "(prog=progname + ' subseqFreq', description=descr)\n", (29133, 29183), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((30682, 30705), 'numpy.zeros', 'np.zeros', (['L'], {'dtype': '"""u1"""'}), "(L, dtype='u1')\n", (30690, 30705), True, 'import numpy as np\n'), ((31721, 31744), 'numpy.save', 'np.save', (['args.out', 'logf'], {}), '(args.out, logf)\n', (31728, 31744), True, 'import numpy as np\n'), ((33594, 33668), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': "(progname + ' getEnergies')", 'description': 'descr'}), "(prog=progname + ' getEnergies', description=descr)\n", (33617, 33668), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((34952, 34972), 'numpy.load', 'np.load', (['args.bimarg'], {}), '(args.bimarg)\n', (34959, 34972), True, 'import numpy as np\n'), ((35384, 35413), 'numpy.zeros', 'np.zeros', (['J.shape'], {'dtype': '"""f4"""'}), "(J.shape, dtype='f4')\n", (35392, 35413), True, 'import numpy as np\n'), ((38224, 38255), 'mcmcGPU.wgsize_heuristic', 'wgsize_heuristic', (['p.q', 'p.wgsize'], {}), '(p.q, p.wgsize)\n', (38240, 38255), False, 'from mcmcGPU import setup_GPU_context, initGPU, wgsize_heuristic, printGPUs\n'), ((39438, 39458), 'numpy.load', 'np.load', (['args.bimarg'], {}), '(args.bimarg)\n', (39445, 39458), True, 'import numpy as np\n'), ((39635, 39671), 'numpy.any', 'np.any', (['((bimarg <= 0) | (bimarg > 1))'], {}), '((bimarg <= 0) | (bimarg > 1))\n', (39641, 39671), True, 'import numpy as np\n'), ((44415, 44431), 'utils.getLq', 'getLq', (['couplings'], {}), '(couplings)\n', (44420, 44431), False, 'from utils import printsome, getLq, unimarg\n'), ((48894, 48920), 'os.path.join', 'os.path.join', (['sdir', '"""seqs"""'], {}), "(sdir, 'seqs')\n", (48906, 48920), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((51177, 51235), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'descr', 'add_help': '(False)'}), '(description=descr, add_help=False)\n', (51200, 51235), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((1894, 1911), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (1905, 1911), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((2627, 2633), 'mpmath.mpf', 'mpf', (['(0)'], {}), '(0)\n', (2630, 2633), False, 'from mpmath import mpf, mp\n'), ((9782, 9821), 'mcmcGPU.initGPU', 'initGPU', (['id', 'clinfo', 'dev', 'nwalk', 'p', 'log'], {}), '(id, clinfo, dev, nwalk, p, log)\n', (9789, 9821), False, 'from mcmcGPU import setup_GPU_context, initGPU, wgsize_heuristic, printGPUs\n'), ((10735, 10774), 'mcmcGPU.initGPU', 'initGPU', (['id', 'clinfo', 'dev', 'nwalk', 'p', 'log'], {}), '(id, clinfo, dev, nwalk, p, log)\n', (10742, 10774), False, 'from mcmcGPU import setup_GPU_context, initGPU, wgsize_heuristic, printGPUs\n'), ((11654, 11693), 'mcmcGPU.initGPU', 'initGPU', (['id', 'clinfo', 'dev', 'nwalk', 'p', 'log'], {}), '(id, clinfo, dev, nwalk, p, log)\n', (11661, 11693), False, 'from mcmcGPU import setup_GPU_context, initGPU, wgsize_heuristic, printGPUs\n'), ((14646, 14661), 'utils.getLq', 'getLq', (['p.bimarg'], {}), '(p.bimarg)\n', (14651, 14661), False, 'from utils import printsome, getLq, unimarg\n'), ((15990, 16020), 'numpy.split', 'np.split', (['seedseqs', 'gpus.ngpus'], {}), '(seedseqs, gpus.ngpus)\n', (15998, 16020), True, 'import numpy as np\n'), ((17266, 17291), 'numpy.sum', 'np.sum', (['(p.tempering == B0)'], {}), '(p.tempering == B0)\n', (17272, 17291), True, 'import numpy as np\n'), ((17329, 17348), 'numpy.sum', 'np.sum', (['(f * (1 - f))'], {}), '(f * (1 - f))\n', (17335, 17348), True, 'import numpy as np\n'), ((23611, 23641), 'numpy.split', 'np.split', (['seedseqs', 'gpus.ngpus'], {}), '(seedseqs, gpus.ngpus)\n', (23619, 23641), True, 'import numpy as np\n'), ((24618, 24638), 'numpy.float64', 'np.float64', (['p.nsteps'], {}), '(p.nsteps)\n', (24628, 24638), True, 'import numpy as np\n'), ((27772, 27791), 'numpy.max', 'np.max', (['p.tempering'], {}), '(p.tempering)\n', (27778, 27791), True, 'import numpy as np\n'), ((28400, 28432), 'os.path.join', 'os.path.join', (['outdir', '"""bicounts"""'], {}), "(outdir, 'bicounts')\n", (28412, 28432), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((28465, 28495), 'os.path.join', 'os.path.join', (['outdir', '"""bimarg"""'], {}), "(outdir, 'bimarg')\n", (28477, 28495), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((28523, 28555), 'os.path.join', 'os.path.join', (['outdir', '"""energies"""'], {}), "(outdir, 'energies')\n", (28535, 28555), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((28588, 28616), 'os.path.join', 'os.path.join', (['outdir', '"""seqs"""'], {}), "(outdir, 'seqs')\n", (28600, 28616), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((28936, 28960), 'numpy.mean', 'np.mean', (['sampledenergies'], {}), '(sampledenergies)\n', (28943, 28960), True, 'import numpy as np\n'), ((31601, 31629), 'scipy.special.logsumexp', 'logsumexp', (['(origEs - energies)'], {}), '(origEs - energies)\n', (31610, 31629), False, 'from scipy.special import logsumexp\n'), ((35660, 35677), 'numpy.zeros', 'np.zeros', (['J.shape'], {}), '(J.shape)\n', (35668, 35677), True, 'import numpy as np\n'), ((35949, 35975), 'numpy.sum', 'np.sum', (['Rab'], {'axis': '(-1, -2)'}), '(Rab, axis=(-1, -2))\n', (35955, 35975), True, 'import numpy as np\n'), ((39482, 39497), 'numpy.dtype', 'np.dtype', (['"""<f4"""'], {}), "('<f4')\n", (39490, 39497), True, 'import numpy as np\n'), ((44703, 44756), 'numpy.repeat', 'np.repeat', (['seqs', '((n - 1) // seqs.shape[0] + 1)'], {'axis': '(0)'}), '(seqs, (n - 1) // seqs.shape[0] + 1, axis=0)\n', (44712, 44756), True, 'import numpy as np\n'), ((45172, 45195), 'numpy.load', 'np.load', (['args.seqbimarg'], {}), '(args.seqbimarg)\n', (45179, 45195), True, 'import numpy as np\n'), ((50712, 50728), 'mcmcGPU.printGPUs', 'printGPUs', (['print'], {}), '(print)\n', (50721, 50728), False, 'from mcmcGPU import setup_GPU_context, initGPU, wgsize_heuristic, printGPUs\n'), ((8553, 8573), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (8571, 8573), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((8608, 8631), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8629, 8631), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((10097, 10126), 'os.path.join', 'os.path.join', (['p.outdir', '"""ptx"""'], {}), "(p.outdir, 'ptx')\n", (10109, 10126), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((10856, 10874), 'node_manager.GPU_node', 'GPU_node', (['headgpus'], {}), '(headgpus)\n', (10864, 10874), False, 'from node_manager import GPU_node\n'), ((10894, 10916), 'mpi_manager.MPI_GPU_node', 'MPI_GPU_node', (['(r + 1)', 'n'], {}), '(r + 1, n)\n', (10906, 10916), False, 'from mpi_manager import MPI_multinode_controller, MPI_GPU_node, MPI_worker\n'), ((11342, 11371), 'os.path.join', 'os.path.join', (['p.outdir', '"""ptx"""'], {}), "(p.outdir, 'ptx')\n", (11354, 11371), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((13616, 13650), 'os.path.join', 'os.path.join', (['args.finish', '"""run_*"""'], {}), "(args.finish, 'run_*')\n", (13628, 13650), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((17360, 17378), 'numpy.sqrt', 'np.sqrt', (['(2 / np.pi)'], {}), '(2 / np.pi)\n', (17367, 17378), True, 'import numpy as np\n'), ((17377, 17401), 'numpy.sqrt', 'np.sqrt', (['(f * (1 - f) / N)'], {}), '(f * (1 - f) / N)\n', (17384, 17401), True, 'import numpy as np\n'), ((28731, 28764), 'os.path.join', 'os.path.join', (['outdir', '"""walker_Bs"""'], {}), "(outdir, 'walker_Bs')\n", (28743, 28764), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((28766, 28783), 'numpy.concatenate', 'np.concatenate', (['b'], {}), '(b)\n', (28780, 28783), True, 'import numpy as np\n'), ((28801, 28834), 'os.path.join', 'os.path.join', (['outdir', '"""walker_Es"""'], {}), "(outdir, 'walker_Es')\n", (28813, 28834), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((28836, 28853), 'numpy.concatenate', 'np.concatenate', (['e'], {}), '(e)\n', (28850, 28853), True, 'import numpy as np\n'), ((30782, 30803), 'numpy.cumsum', 'np.cumsum', (['gpuwalkers'], {}), '(gpuwalkers)\n', (30791, 30803), True, 'import numpy as np\n'), ((35704, 35716), 'numpy.arange', 'np.arange', (['q'], {}), '(q)\n', (35713, 35716), True, 'import numpy as np\n'), ((35718, 35730), 'numpy.arange', 'np.arange', (['q'], {}), '(q)\n', (35727, 35730), True, 'import numpy as np\n'), ((40221, 40238), 'numpy.load', 'np.load', (['rargs[0]'], {}), '(rargs[0])\n', (40228, 40238), True, 'import numpy as np\n'), ((43006, 43054), 'numpy.zeros', 'np.zeros', (['(L * (L - 1) // 2, q * q)'], {'dtype': '"""<f4"""'}), "((L * (L - 1) // 2, q * q), dtype='<f4')\n", (43014, 43054), True, 'import numpy as np\n'), ((43965, 44003), 'os.path.join', 'os.path.join', (['args.init_model', '"""J.npy"""'], {}), "(args.init_model, 'J.npy')\n", (43977, 44003), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((44015, 44033), 'os.path.isfile', 'os.path.isfile', (['fn'], {}), '(fn)\n', (44029, 44033), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((49385, 49408), 'numpy.load', 'np.load', (['args.tempering'], {}), '(args.tempering)\n', (49392, 49408), True, 'import numpy as np\n'), ((7573, 7586), 'os.urandom', 'os.urandom', (['(4)'], {}), '(4)\n', (7583, 7586), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((8469, 8488), 'numpy.max', 'np.max', (['p.tempering'], {}), '(p.tempering)\n', (8475, 8488), True, 'import numpy as np\n'), ((8854, 8877), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8875, 8877), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((13294, 13306), 'pipes.quote', 'cmd_quote', (['a'], {}), '(a)\n', (13303, 13306), True, 'from pipes import quote as cmd_quote\n'), ((13764, 13798), 'os.path.join', 'os.path.join', (['fn', '"""perturbedJ.npy"""'], {}), "(fn, 'perturbedJ.npy')\n", (13776, 13798), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((14074, 14114), 'os.path.join', 'os.path.join', (['args.finish', '"""config.json"""'], {}), "(args.finish, 'config.json')\n", (14086, 14114), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((14312, 14352), 'os.path.join', 'os.path.join', (['args.outdir', '"""command.txt"""'], {}), "(args.outdir, 'command.txt')\n", (14324, 14352), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((25812, 25824), 'pipes.quote', 'cmd_quote', (['a'], {}), '(a)\n', (25821, 25824), True, 'from pipes import quote as cmd_quote\n'), ((39760, 39777), 'utils.printsome', 'printsome', (['bimarg'], {}), '(bimarg)\n', (39769, 39777), False, 'from utils import printsome, getLq, unimarg\n'), ((42269, 42289), 'utils.printsome', 'printsome', (['couplings'], {}), '(couplings)\n', (42278, 42289), False, 'from utils import printsome, getLq, unimarg\n'), ((42976, 42991), 'numpy.log', 'np.log', (['(1.0 / q)'], {}), '(1.0 / q)\n', (42982, 42991), True, 'import numpy as np\n'), ((43070, 43094), 'utils.changeGauge.fieldlessGaugeEven', 'fieldlessGaugeEven', (['h', 'J'], {}), '(h, J)\n', (43088, 43094), False, 'from utils.changeGauge import fieldlessGaugeEven\n'), ((43433, 43481), 'numpy.zeros', 'np.zeros', (['(L * (L - 1) // 2, q * q)'], {'dtype': '"""<f4"""'}), "((L * (L - 1) // 2, q * q), dtype='<f4')\n", (43441, 43481), True, 'import numpy as np\n'), ((43667, 43690), 'numpy.load', 'np.load', (['args.couplings'], {}), '(args.couplings)\n', (43674, 43690), True, 'import numpy as np\n'), ((44120, 44131), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (44127, 44131), True, 'import numpy as np\n'), ((45455, 45496), 'os.path.join', 'os.path.join', (['args.outdir', '"""initial_seqs"""'], {}), "(args.outdir, 'initial_seqs')\n", (45467, 45496), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((47730, 47760), 'numpy.random.randint', 'randint', (['(0)', 'q'], {'size': '(nseqs, L)'}), '(0, q, size=(nseqs, L))\n', (47737, 47760), False, 'from numpy.random import randint, rand\n'), ((48016, 48031), 'utils.unimarg', 'unimarg', (['bimarg'], {}), '(bimarg)\n', (48023, 48031), False, 'from utils import printsome, getLq, unimarg\n'), ((48653, 48681), 'utils.seqload.loadSeqs', 'loadSeqs', (['sfile'], {'names': 'alpha'}), '(sfile, names=alpha)\n', (48661, 48681), False, 'from utils.seqload import loadSeqs, writeSeqs\n'), ((48932, 48960), 'utils.seqload.loadSeqs', 'loadSeqs', (['sfile'], {'names': 'alpha'}), '(sfile, names=alpha)\n', (48940, 48960), False, 'from utils.seqload import loadSeqs, writeSeqs\n'), ((1985, 2004), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (1998, 2004), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((43497, 43521), 'utils.changeGauge.fieldlessGaugeEven', 'fieldlessGaugeEven', (['h', 'J'], {}), '(h, J)\n', (43515, 43521), False, 'from utils.changeGauge import fieldlessGaugeEven\n'), ((43725, 43740), 'numpy.dtype', 'np.dtype', (['"""<f4"""'], {}), "('<f4')\n", (43733, 43740), True, 'import numpy as np\n'), ((44166, 44181), 'numpy.dtype', 'np.dtype', (['"""<f4"""'], {}), "('<f4')\n", (44174, 44181), True, 'import numpy as np\n'), ((14394, 14406), 'pipes.quote', 'cmd_quote', (['a'], {}), '(a)\n', (14403, 14406), True, 'from pipes import quote as cmd_quote\n'), ((43400, 43415), 'utils.unimarg', 'unimarg', (['bimarg'], {}), '(bimarg)\n', (43407, 43415), False, 'from utils import printsome, getLq, unimarg\n'), ((47179, 47219), 'os.path.join', 'os.path.join', (['args.init_model', '"""seedseq"""'], {}), "(args.init_model, 'seedseq')\n", (47191, 47219), False, 'import sys, os, errno, time, datetime, socket, signal, atexit, glob, argparse\n'), ((48156, 48167), 'numpy.random.rand', 'rand', (['nseqs'], {}), '(nseqs)\n', (48160, 48167), False, 'from numpy.random import randint, rand\n')] |
#
# * The source code in this file is based on the soure code of CuPy.
#
# # NLCPy License #
#
# Copyright (c) 2020-2021 NEC Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither NEC Corporation nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# # CuPy License #
#
# Copyright (c) 2015 Preferred Infrastructure, Inc.
# Copyright (c) 2015 Preferred Networks, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
import unittest
import numpy
import nlcpy
from nlcpy import testing
@testing.parameterize(*(
testing.product({
'shape': [(2,), (2, 3), (2, 3, 4)],
})
))
class TestSumprod1(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_nlcpy_allclose()
def test_sum_all(self, xp, dtype):
a = testing.shaped_arange(self.shape, xp, dtype)
return xp.sum(a)
@testing.for_all_dtypes()
@testing.numpy_nlcpy_allclose()
def test_external_sum_all(self, xp, dtype):
a = testing.shaped_arange(self.shape, xp, dtype)
return xp.sum(a)
class TestSumprod2(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_nlcpy_allclose(rtol=1e-6)
def test_sum_all2(self, xp, dtype):
a = testing.shaped_arange((20, 30, 40), xp, dtype)
return xp.sum(a)
@testing.for_all_dtypes()
@testing.numpy_nlcpy_allclose()
def test_sum_all_transposed(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype).transpose(2, 0, 1)
return xp.sum(a)
@testing.for_all_dtypes()
@testing.numpy_nlcpy_allclose(rtol=1e-6)
def test_sum_all_transposed2(self, xp, dtype):
a = testing.shaped_arange((20, 30, 40), xp, dtype).transpose(2, 0, 1)
return xp.sum(a)
@testing.for_all_dtypes()
@testing.numpy_nlcpy_allclose()
def test_sum_axis(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return xp.sum(a, axis=1)
@testing.with_requires('numpy>=1.10')
@testing.numpy_nlcpy_allclose(rtol=1e-5)
def test_sum_axis_huge(self, xp):
a = testing.shaped_random((2048, 1, 1024), xp, 'f4')
a = xp.broadcast_to(a, (2048, 1024, 1024))
return xp.sum(a, axis=2)
@testing.for_all_dtypes()
@testing.numpy_nlcpy_allclose()
def test_external_sum_axis(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return xp.sum(a, axis=1)
@testing.for_all_dtypes(no_float16=True)
@testing.numpy_nlcpy_allclose()
def test_sum_axis2(self, xp, dtype):
a = testing.shaped_arange((20, 30, 40), xp, dtype)
return xp.sum(a, axis=1)
@testing.for_all_dtypes()
@testing.numpy_nlcpy_allclose(contiguous_check=False)
def test_sum_axis_transposed(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype).transpose(2, 0, 1)
return xp.sum(a, axis=1)
@testing.for_all_dtypes()
@testing.numpy_nlcpy_allclose(contiguous_check=False)
def test_sum_axis_transposed2(self, xp, dtype):
a = testing.shaped_arange((20, 30, 40), xp, dtype).transpose(2, 0, 1)
return xp.sum(a, axis=1)
@testing.for_all_dtypes()
@testing.numpy_nlcpy_allclose()
def test_sum_axes(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4, 5), xp, dtype)
return xp.sum(a, axis=(1, 3))
@testing.for_all_dtypes()
@testing.numpy_nlcpy_allclose(rtol=1e-4)
def test_sum_axes2(self, xp, dtype):
a = testing.shaped_arange((20, 30, 40, 50), xp, dtype)
return xp.sum(a, axis=(1, 3))
@testing.for_all_dtypes()
@testing.numpy_nlcpy_allclose(rtol=1e-6)
def test_sum_axes3(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4, 5), xp, dtype)
return xp.sum(a, axis=(0, 2, 3))
@testing.for_all_dtypes()
@testing.numpy_nlcpy_allclose(rtol=1e-6)
def test_sum_axes4(self, xp, dtype):
a = testing.shaped_arange((20, 30, 40, 50), xp, dtype)
return xp.sum(a, axis=(0, 2, 3))
@testing.for_all_dtypes_combination(names=['src_dtype', 'dst_dtype'])
@testing.numpy_nlcpy_allclose()
def test_sum_dtype(self, xp, src_dtype, dst_dtype):
if not numpy.can_cast(src_dtype, dst_dtype):
return xp.array([]) # skip
a = testing.shaped_arange((2, 3, 4), xp, src_dtype)
return xp.sum(a, dtype=dst_dtype)
@testing.numpy_nlcpy_allclose()
def test_sum_keepdims(self, xp):
a = testing.shaped_arange((2, 3, 4), xp)
return xp.sum(a, axis=1, keepdims=True)
@testing.for_all_dtypes()
@testing.numpy_nlcpy_allclose()
def test_sum_out(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = xp.empty((2, 4), dtype=dtype)
xp.sum(a, axis=1, out=b)
return b
def test_sum_out_wrong_shape(self):
a = testing.shaped_arange((2, 3, 4))
b = nlcpy.empty((2, 3))
with self.assertRaises(NotImplementedError):
nlcpy.sum(a, axis=1, out=b)
axes = [0, 1, 2]
@testing.parameterize(*testing.product({'axis': axes}))
class TestCumsum(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_nlcpy_allclose()
def test_cumsum(self, xp, dtype):
a = testing.shaped_arange((5,), xp, dtype)
return xp.cumsum(a)
@testing.for_all_dtypes()
@testing.numpy_nlcpy_allclose()
def test_cumsum_2dim(self, xp, dtype):
a = testing.shaped_arange((4, 5), xp, dtype)
return xp.cumsum(a)
@testing.for_all_dtypes()
@testing.numpy_nlcpy_allclose(contiguous_check=False)
def test_cumsum_axis(self, xp, dtype):
n = len(axes)
a = testing.shaped_arange(tuple(range(4, 4 + n)), xp, dtype)
return xp.cumsum(a, axis=self.axis)
@testing.for_all_dtypes()
@testing.numpy_nlcpy_allclose(accept_error=nlcpy.core.error._AxisError)
def test_cumsum_axis_empty(self, xp, dtype):
n = len(axes)
a = testing.shaped_arange(tuple(range(0, n)), xp, dtype)
return xp.cumsum(a, axis=self.axis)
@testing.for_all_dtypes()
@testing.with_requires('numpy>=1.13')
@testing.numpy_nlcpy_raises()
def test_invalid_axis_lower1(self, xp, dtype):
a = testing.shaped_arange((4, 5), xp, dtype)
return xp.cumsum(a, axis=-a.ndim - 1)
@testing.for_all_dtypes()
def test_invalid_axis_lower2(self, dtype):
a = testing.shaped_arange((4, 5), nlcpy, dtype)
with self.assertRaises(nlcpy.core.error._AxisError):
return nlcpy.cumsum(a, axis=-a.ndim - 1)
@testing.for_all_dtypes()
@testing.with_requires('numpy>=1.13')
@testing.numpy_nlcpy_raises()
def test_invalid_axis_upper1(self, xp, dtype):
a = testing.shaped_arange((4, 5), xp, dtype)
return xp.cumsum(a, axis=a.ndim + 1)
@testing.for_all_dtypes()
def test_invalid_axis_upper2(self, dtype):
a = testing.shaped_arange((4, 5), nlcpy, dtype)
with self.assertRaises(nlcpy.core.error._AxisError):
return nlcpy.cumsum(a, axis=a.ndim + 1)
@testing.numpy_nlcpy_allclose()
def test_cumsum_arraylike(self, xp):
return xp.cumsum((1, 2, 3))
@testing.for_float_dtypes()
@testing.numpy_nlcpy_allclose()
def test_cumsum_numpy_array(self, xp, dtype):
a_numpy = numpy.arange(8, dtype=dtype)
return xp.cumsum(a_numpy)
@testing.with_requires('numpy>=1.14') # NumPy issue #9251
class TestDiff(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_nlcpy_allclose()
def test_diff_1dim(self, xp, dtype):
a = testing.shaped_arange((5,), xp, dtype)
return xp.diff(a)
@testing.for_all_dtypes()
@testing.numpy_nlcpy_allclose()
def test_diff_1dim_with_n(self, xp, dtype):
a = testing.shaped_arange((5,), xp, dtype)
return xp.diff(a, n=3)
@testing.for_all_dtypes()
@testing.numpy_nlcpy_allclose()
def test_diff_2dim_without_axis(self, xp, dtype):
a = testing.shaped_arange((4, 5), xp, dtype)
return xp.diff(a)
@testing.for_all_dtypes()
@testing.numpy_nlcpy_allclose()
def test_diff_2dim_with_axis(self, xp, dtype):
a = testing.shaped_arange((4, 5), xp, dtype)
return xp.diff(a, axis=-2)
@testing.for_all_dtypes()
@testing.numpy_nlcpy_allclose()
def test_diff_2dim_with_n_and_axis(self, xp, dtype):
a = testing.shaped_arange((4, 5), xp, dtype)
return xp.diff(a, 2, 1)
@testing.with_requires('numpy>=1.16')
@testing.for_all_dtypes()
@testing.numpy_nlcpy_allclose()
def test_diff_2dim_with_prepend(self, xp, dtype):
a = testing.shaped_arange((4, 5), xp, dtype)
b = testing.shaped_arange((4, 1), xp, dtype)
return xp.diff(a, axis=-1, prepend=b)
@testing.with_requires('numpy>=1.16')
@testing.for_all_dtypes()
@testing.numpy_nlcpy_allclose()
def test_diff_2dim_with_append(self, xp, dtype):
a = testing.shaped_arange((4, 5), xp, dtype)
b = testing.shaped_arange((1, 5), xp, dtype)
return xp.diff(a, axis=0, append=b, n=2)
@testing.with_requires('numpy>=1.16')
@testing.for_all_dtypes()
@testing.numpy_nlcpy_allclose()
def test_diff_2dim_with_scalar_append(self, xp, dtype):
a = testing.shaped_arange((4, 5), xp, dtype)
return xp.diff(a, prepend=1, append=0)
| [
"nlcpy.testing.for_all_dtypes_combination",
"nlcpy.testing.product",
"nlcpy.testing.shaped_arange",
"nlcpy.testing.shaped_random",
"nlcpy.sum",
"nlcpy.testing.for_float_dtypes",
"nlcpy.testing.numpy_nlcpy_raises",
"numpy.can_cast",
"nlcpy.testing.for_all_dtypes",
"nlcpy.cumsum",
"nlcpy.testing.w... | [((9751, 9787), 'nlcpy.testing.with_requires', 'testing.with_requires', (['"""numpy>=1.14"""'], {}), "('numpy>=1.14')\n", (9772, 9787), False, 'from nlcpy import testing\n'), ((3071, 3095), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (3093, 3095), False, 'from nlcpy import testing\n'), ((3101, 3131), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {}), '()\n', (3129, 3131), False, 'from nlcpy import testing\n'), ((3259, 3283), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (3281, 3283), False, 'from nlcpy import testing\n'), ((3289, 3319), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {}), '()\n', (3317, 3319), False, 'from nlcpy import testing\n'), ((3497, 3521), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (3519, 3521), False, 'from nlcpy import testing\n'), ((3527, 3567), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {'rtol': '(1e-06)'}), '(rtol=1e-06)\n', (3555, 3567), False, 'from nlcpy import testing\n'), ((3697, 3721), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (3719, 3721), False, 'from nlcpy import testing\n'), ((3727, 3757), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {}), '()\n', (3755, 3757), False, 'from nlcpy import testing\n'), ((3914, 3938), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (3936, 3938), False, 'from nlcpy import testing\n'), ((3944, 3984), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {'rtol': '(1e-06)'}), '(rtol=1e-06)\n', (3972, 3984), False, 'from nlcpy import testing\n'), ((4144, 4168), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (4166, 4168), False, 'from nlcpy import testing\n'), ((4174, 4204), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {}), '()\n', (4202, 4204), False, 'from nlcpy import testing\n'), ((4340, 4376), 'nlcpy.testing.with_requires', 'testing.with_requires', (['"""numpy>=1.10"""'], {}), "('numpy>=1.10')\n", (4361, 4376), False, 'from nlcpy import testing\n'), ((4382, 4422), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {'rtol': '(1e-05)'}), '(rtol=1e-05)\n', (4410, 4422), False, 'from nlcpy import testing\n'), ((4611, 4635), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (4633, 4635), False, 'from nlcpy import testing\n'), ((4641, 4671), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {}), '()\n', (4669, 4671), False, 'from nlcpy import testing\n'), ((4816, 4855), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_float16': '(True)'}), '(no_float16=True)\n', (4838, 4855), False, 'from nlcpy import testing\n'), ((4861, 4891), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {}), '()\n', (4889, 4891), False, 'from nlcpy import testing\n'), ((5031, 5055), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (5053, 5055), False, 'from nlcpy import testing\n'), ((5061, 5113), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {'contiguous_check': '(False)'}), '(contiguous_check=False)\n', (5089, 5113), False, 'from nlcpy import testing\n'), ((5279, 5303), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (5301, 5303), False, 'from nlcpy import testing\n'), ((5309, 5361), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {'contiguous_check': '(False)'}), '(contiguous_check=False)\n', (5337, 5361), False, 'from nlcpy import testing\n'), ((5531, 5555), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (5553, 5555), False, 'from nlcpy import testing\n'), ((5561, 5591), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {}), '()\n', (5589, 5591), False, 'from nlcpy import testing\n'), ((5735, 5759), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (5757, 5759), False, 'from nlcpy import testing\n'), ((5765, 5806), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {'rtol': '(0.0001)'}), '(rtol=0.0001)\n', (5793, 5806), False, 'from nlcpy import testing\n'), ((5953, 5977), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (5975, 5977), False, 'from nlcpy import testing\n'), ((5983, 6023), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {'rtol': '(1e-06)'}), '(rtol=1e-06)\n', (6011, 6023), False, 'from nlcpy import testing\n'), ((6170, 6194), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (6192, 6194), False, 'from nlcpy import testing\n'), ((6200, 6240), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {'rtol': '(1e-06)'}), '(rtol=1e-06)\n', (6228, 6240), False, 'from nlcpy import testing\n'), ((6391, 6459), 'nlcpy.testing.for_all_dtypes_combination', 'testing.for_all_dtypes_combination', ([], {'names': "['src_dtype', 'dst_dtype']"}), "(names=['src_dtype', 'dst_dtype'])\n", (6425, 6459), False, 'from nlcpy import testing\n'), ((6465, 6495), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {}), '()\n', (6493, 6495), False, 'from nlcpy import testing\n'), ((6753, 6783), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {}), '()\n', (6781, 6783), False, 'from nlcpy import testing\n'), ((6924, 6948), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (6946, 6948), False, 'from nlcpy import testing\n'), ((6954, 6984), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {}), '()\n', (6982, 6984), False, 'from nlcpy import testing\n'), ((7503, 7527), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (7525, 7527), False, 'from nlcpy import testing\n'), ((7533, 7563), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {}), '()\n', (7561, 7563), False, 'from nlcpy import testing\n'), ((7687, 7711), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (7709, 7711), False, 'from nlcpy import testing\n'), ((7717, 7747), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {}), '()\n', (7745, 7747), False, 'from nlcpy import testing\n'), ((7878, 7902), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (7900, 7902), False, 'from nlcpy import testing\n'), ((7908, 7960), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {'contiguous_check': '(False)'}), '(contiguous_check=False)\n', (7936, 7960), False, 'from nlcpy import testing\n'), ((8145, 8169), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (8167, 8169), False, 'from nlcpy import testing\n'), ((8175, 8245), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {'accept_error': 'nlcpy.core.error._AxisError'}), '(accept_error=nlcpy.core.error._AxisError)\n', (8203, 8245), False, 'from nlcpy import testing\n'), ((8432, 8456), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (8454, 8456), False, 'from nlcpy import testing\n'), ((8462, 8498), 'nlcpy.testing.with_requires', 'testing.with_requires', (['"""numpy>=1.13"""'], {}), "('numpy>=1.13')\n", (8483, 8498), False, 'from nlcpy import testing\n'), ((8504, 8532), 'nlcpy.testing.numpy_nlcpy_raises', 'testing.numpy_nlcpy_raises', ([], {}), '()\n', (8530, 8532), False, 'from nlcpy import testing\n'), ((8689, 8713), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (8711, 8713), False, 'from nlcpy import testing\n'), ((8937, 8961), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (8959, 8961), False, 'from nlcpy import testing\n'), ((8967, 9003), 'nlcpy.testing.with_requires', 'testing.with_requires', (['"""numpy>=1.13"""'], {}), "('numpy>=1.13')\n", (8988, 9003), False, 'from nlcpy import testing\n'), ((9009, 9037), 'nlcpy.testing.numpy_nlcpy_raises', 'testing.numpy_nlcpy_raises', ([], {}), '()\n', (9035, 9037), False, 'from nlcpy import testing\n'), ((9193, 9217), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (9215, 9217), False, 'from nlcpy import testing\n'), ((9440, 9470), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {}), '()\n', (9468, 9470), False, 'from nlcpy import testing\n'), ((9554, 9580), 'nlcpy.testing.for_float_dtypes', 'testing.for_float_dtypes', ([], {}), '()\n', (9578, 9580), False, 'from nlcpy import testing\n'), ((9586, 9616), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {}), '()\n', (9614, 9616), False, 'from nlcpy import testing\n'), ((9850, 9874), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (9872, 9874), False, 'from nlcpy import testing\n'), ((9880, 9910), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {}), '()\n', (9908, 9910), False, 'from nlcpy import testing\n'), ((10035, 10059), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (10057, 10059), False, 'from nlcpy import testing\n'), ((10065, 10095), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {}), '()\n', (10093, 10095), False, 'from nlcpy import testing\n'), ((10232, 10256), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (10254, 10256), False, 'from nlcpy import testing\n'), ((10262, 10292), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {}), '()\n', (10290, 10292), False, 'from nlcpy import testing\n'), ((10432, 10456), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (10454, 10456), False, 'from nlcpy import testing\n'), ((10462, 10492), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {}), '()\n', (10490, 10492), False, 'from nlcpy import testing\n'), ((10638, 10662), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (10660, 10662), False, 'from nlcpy import testing\n'), ((10668, 10698), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {}), '()\n', (10696, 10698), False, 'from nlcpy import testing\n'), ((10847, 10883), 'nlcpy.testing.with_requires', 'testing.with_requires', (['"""numpy>=1.16"""'], {}), "('numpy>=1.16')\n", (10868, 10883), False, 'from nlcpy import testing\n'), ((10889, 10913), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (10911, 10913), False, 'from nlcpy import testing\n'), ((10919, 10949), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {}), '()\n', (10947, 10949), False, 'from nlcpy import testing\n'), ((11162, 11198), 'nlcpy.testing.with_requires', 'testing.with_requires', (['"""numpy>=1.16"""'], {}), "('numpy>=1.16')\n", (11183, 11198), False, 'from nlcpy import testing\n'), ((11204, 11228), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (11226, 11228), False, 'from nlcpy import testing\n'), ((11234, 11264), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {}), '()\n', (11262, 11264), False, 'from nlcpy import testing\n'), ((11479, 11515), 'nlcpy.testing.with_requires', 'testing.with_requires', (['"""numpy>=1.16"""'], {}), "('numpy>=1.16')\n", (11500, 11515), False, 'from nlcpy import testing\n'), ((11521, 11545), 'nlcpy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (11543, 11545), False, 'from nlcpy import testing\n'), ((11551, 11581), 'nlcpy.testing.numpy_nlcpy_allclose', 'testing.numpy_nlcpy_allclose', ([], {}), '()\n', (11579, 11581), False, 'from nlcpy import testing\n'), ((3183, 3227), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['self.shape', 'xp', 'dtype'], {}), '(self.shape, xp, dtype)\n', (3204, 3227), False, 'from nlcpy import testing\n'), ((3380, 3424), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['self.shape', 'xp', 'dtype'], {}), '(self.shape, xp, dtype)\n', (3401, 3424), False, 'from nlcpy import testing\n'), ((2954, 3007), 'nlcpy.testing.product', 'testing.product', (["{'shape': [(2,), (2, 3), (2, 3, 4)]}"], {}), "({'shape': [(2,), (2, 3), (2, 3, 4)]})\n", (2969, 3007), False, 'from nlcpy import testing\n'), ((3619, 3665), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(20, 30, 40)', 'xp', 'dtype'], {}), '((20, 30, 40), xp, dtype)\n', (3640, 3665), False, 'from nlcpy import testing\n'), ((4257, 4300), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(2, 3, 4)', 'xp', 'dtype'], {}), '((2, 3, 4), xp, dtype)\n', (4278, 4300), False, 'from nlcpy import testing\n'), ((4472, 4520), 'nlcpy.testing.shaped_random', 'testing.shaped_random', (['(2048, 1, 1024)', 'xp', '"""f4"""'], {}), "((2048, 1, 1024), xp, 'f4')\n", (4493, 4520), False, 'from nlcpy import testing\n'), ((4733, 4776), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(2, 3, 4)', 'xp', 'dtype'], {}), '((2, 3, 4), xp, dtype)\n', (4754, 4776), False, 'from nlcpy import testing\n'), ((4945, 4991), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(20, 30, 40)', 'xp', 'dtype'], {}), '((20, 30, 40), xp, dtype)\n', (4966, 4991), False, 'from nlcpy import testing\n'), ((5644, 5690), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(2, 3, 4, 5)', 'xp', 'dtype'], {}), '((2, 3, 4, 5), xp, dtype)\n', (5665, 5690), False, 'from nlcpy import testing\n'), ((5858, 5908), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(20, 30, 40, 50)', 'xp', 'dtype'], {}), '((20, 30, 40, 50), xp, dtype)\n', (5879, 5908), False, 'from nlcpy import testing\n'), ((6076, 6122), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(2, 3, 4, 5)', 'xp', 'dtype'], {}), '((2, 3, 4, 5), xp, dtype)\n', (6097, 6122), False, 'from nlcpy import testing\n'), ((6293, 6343), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(20, 30, 40, 50)', 'xp', 'dtype'], {}), '((20, 30, 40, 50), xp, dtype)\n', (6314, 6343), False, 'from nlcpy import testing\n'), ((6657, 6704), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(2, 3, 4)', 'xp', 'src_dtype'], {}), '((2, 3, 4), xp, src_dtype)\n', (6678, 6704), False, 'from nlcpy import testing\n'), ((6833, 6869), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(2, 3, 4)', 'xp'], {}), '((2, 3, 4), xp)\n', (6854, 6869), False, 'from nlcpy import testing\n'), ((7036, 7079), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(2, 3, 4)', 'xp', 'dtype'], {}), '((2, 3, 4), xp, dtype)\n', (7057, 7079), False, 'from nlcpy import testing\n'), ((7225, 7257), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(2, 3, 4)'], {}), '((2, 3, 4))\n', (7246, 7257), False, 'from nlcpy import testing\n'), ((7270, 7289), 'nlcpy.empty', 'nlcpy.empty', (['(2, 3)'], {}), '((2, 3))\n', (7281, 7289), False, 'import nlcpy\n'), ((7614, 7652), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(5,)', 'xp', 'dtype'], {}), '((5,), xp, dtype)\n', (7635, 7652), False, 'from nlcpy import testing\n'), ((7803, 7843), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(4, 5)', 'xp', 'dtype'], {}), '((4, 5), xp, dtype)\n', (7824, 7843), False, 'from nlcpy import testing\n'), ((8596, 8636), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(4, 5)', 'xp', 'dtype'], {}), '((4, 5), xp, dtype)\n', (8617, 8636), False, 'from nlcpy import testing\n'), ((8773, 8816), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(4, 5)', 'nlcpy', 'dtype'], {}), '((4, 5), nlcpy, dtype)\n', (8794, 8816), False, 'from nlcpy import testing\n'), ((9101, 9141), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(4, 5)', 'xp', 'dtype'], {}), '((4, 5), xp, dtype)\n', (9122, 9141), False, 'from nlcpy import testing\n'), ((9277, 9320), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(4, 5)', 'nlcpy', 'dtype'], {}), '((4, 5), nlcpy, dtype)\n', (9298, 9320), False, 'from nlcpy import testing\n'), ((9685, 9713), 'numpy.arange', 'numpy.arange', (['(8)'], {'dtype': 'dtype'}), '(8, dtype=dtype)\n', (9697, 9713), False, 'import numpy\n'), ((7427, 7458), 'nlcpy.testing.product', 'testing.product', (["{'axis': axes}"], {}), "({'axis': axes})\n", (7442, 7458), False, 'from nlcpy import testing\n'), ((9964, 10002), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(5,)', 'xp', 'dtype'], {}), '((5,), xp, dtype)\n', (9985, 10002), False, 'from nlcpy import testing\n'), ((10156, 10194), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(5,)', 'xp', 'dtype'], {}), '((5,), xp, dtype)\n', (10177, 10194), False, 'from nlcpy import testing\n'), ((10359, 10399), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(4, 5)', 'xp', 'dtype'], {}), '((4, 5), xp, dtype)\n', (10380, 10399), False, 'from nlcpy import testing\n'), ((10556, 10596), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(4, 5)', 'xp', 'dtype'], {}), '((4, 5), xp, dtype)\n', (10577, 10596), False, 'from nlcpy import testing\n'), ((10768, 10808), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(4, 5)', 'xp', 'dtype'], {}), '((4, 5), xp, dtype)\n', (10789, 10808), False, 'from nlcpy import testing\n'), ((11016, 11056), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(4, 5)', 'xp', 'dtype'], {}), '((4, 5), xp, dtype)\n', (11037, 11056), False, 'from nlcpy import testing\n'), ((11069, 11109), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(4, 1)', 'xp', 'dtype'], {}), '((4, 1), xp, dtype)\n', (11090, 11109), False, 'from nlcpy import testing\n'), ((11330, 11370), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(4, 5)', 'xp', 'dtype'], {}), '((4, 5), xp, dtype)\n', (11351, 11370), False, 'from nlcpy import testing\n'), ((11383, 11423), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(1, 5)', 'xp', 'dtype'], {}), '((1, 5), xp, dtype)\n', (11404, 11423), False, 'from nlcpy import testing\n'), ((11654, 11694), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(4, 5)', 'xp', 'dtype'], {}), '((4, 5), xp, dtype)\n', (11675, 11694), False, 'from nlcpy import testing\n'), ((6567, 6603), 'numpy.can_cast', 'numpy.can_cast', (['src_dtype', 'dst_dtype'], {}), '(src_dtype, dst_dtype)\n', (6581, 6603), False, 'import numpy\n'), ((7355, 7382), 'nlcpy.sum', 'nlcpy.sum', (['a'], {'axis': '(1)', 'out': 'b'}), '(a, axis=1, out=b)\n', (7364, 7382), False, 'import nlcpy\n'), ((8897, 8930), 'nlcpy.cumsum', 'nlcpy.cumsum', (['a'], {'axis': '(-a.ndim - 1)'}), '(a, axis=-a.ndim - 1)\n', (8909, 8930), False, 'import nlcpy\n'), ((9401, 9433), 'nlcpy.cumsum', 'nlcpy.cumsum', (['a'], {'axis': '(a.ndim + 1)'}), '(a, axis=a.ndim + 1)\n', (9413, 9433), False, 'import nlcpy\n'), ((3820, 3863), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(2, 3, 4)', 'xp', 'dtype'], {}), '((2, 3, 4), xp, dtype)\n', (3841, 3863), False, 'from nlcpy import testing\n'), ((4047, 4093), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(20, 30, 40)', 'xp', 'dtype'], {}), '((20, 30, 40), xp, dtype)\n', (4068, 4093), False, 'from nlcpy import testing\n'), ((5177, 5220), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(2, 3, 4)', 'xp', 'dtype'], {}), '((2, 3, 4), xp, dtype)\n', (5198, 5220), False, 'from nlcpy import testing\n'), ((5426, 5472), 'nlcpy.testing.shaped_arange', 'testing.shaped_arange', (['(20, 30, 40)', 'xp', 'dtype'], {}), '((20, 30, 40), xp, dtype)\n', (5447, 5472), False, 'from nlcpy import testing\n')] |
import os
# Disable Tensorflow's INFO and WARNING messages
# See http://stackoverflow.com/questions/35911252
if 'TF_CPP_MIN_LOG_LEVEL' not in os.environ:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import numpy as np
import numpy.random
import os.path
import random
import tensorflow as tf
import dm_celeba
import dm_flags
import dm_infer
import dm_input
import dm_model
import dm_show
import dm_train
import dm_utils
FLAGS = tf.app.flags.FLAGS
def _setup_tensorflow():
# Create session
config = tf.ConfigProto(log_device_placement=False) #, intra_op_parallelism_threads=1)
sess = tf.Session(config=config)
# Initialize all RNGs with a deterministic seed
with sess.graph.as_default():
tf.set_random_seed(FLAGS.random_seed)
random.seed(FLAGS.random_seed)
np.random.seed(FLAGS.random_seed)
return sess
# TBD: Move to dm_train.py?
def _prepare_train_dirs():
# Create checkpoint dir (do not delete anything)
if not tf.gfile.Exists(FLAGS.checkpoint_dir):
tf.gfile.MakeDirs(FLAGS.checkpoint_dir)
# Cleanup train dir
if tf.gfile.Exists(FLAGS.train_dir):
try:
tf.gfile.DeleteRecursively(FLAGS.train_dir)
except:
pass
tf.gfile.MakeDirs(FLAGS.train_dir)
# Ensure dataset folder exists
if not tf.gfile.Exists(FLAGS.dataset) or \
not tf.gfile.IsDirectory(FLAGS.dataset):
raise FileNotFoundError("Could not find folder `%s'" % (FLAGS.dataset,))
# TBD: Move to dm_train.py?
def _get_train_data():
# Setup global tensorflow state
sess = _setup_tensorflow()
# Prepare directories
_prepare_train_dirs()
# Which type of transformation?
# Note: eyeglasses and sunglasses are filtered out because they tend to produce artifacts
if FLAGS.train_mode == 'ftm' or FLAGS.train_mode == 'f2m':
# Trans filter: from female to attractive male
# Note: removed facial hair from target images because otherwise the network becomes overly focused on rendering facial hair
source_filter = {'Male':False, 'Blurry':False, 'Eyeglasses':False}
target_filter = {'Male':True, 'Blurry':False, 'Eyeglasses':False, 'Attractive':True, 'Goatee':False, 'Mustache':False, 'No_Beard':True}
elif FLAGS.train_mode == 'mtf' or FLAGS.train_mode == 'm2f':
# Trans filter: from male to attractuve female
source_filter = {'Male':True, 'Blurry':False, 'Eyeglasses':False}
target_filter = {'Male':False, 'Blurry':False, 'Eyeglasses':False, 'Attractive':True}
elif FLAGS.train_mode == 'ftf' or FLAGS.train_mode == 'f2f':
# Vanity filter: from female to attractive female
source_filter = {'Male':False, 'Blurry':False, 'Eyeglasses':False}
target_filter = {'Male':False, 'Blurry':False, 'Eyeglasses':False, 'Attractive':True}
elif FLAGS.train_mode == "mtm" or FLAGS.train_mode == 'm2m':
# Vanity filter: from male to attractive male
source_filter = {'Male':True, 'Blurry':False, 'Eyeglasses':False}
target_filter = {'Male':True, 'Blurry':False, 'Eyeglasses':False, 'Attractive':True}
else:
raise ValueError('`train_mode` must be one of: `ftm`, `mtf`, `ftf` or `mtm`')
# Setup async input queues
selected = dm_celeba.select_samples(source_filter)
source_images = dm_input.input_data(sess, 'train', selected)
test_images = dm_input.input_data(sess, 'test', selected)
print('%8d source images selected' % (len(selected),))
selected = dm_celeba.select_samples(target_filter)
target_images = dm_input.input_data(sess, 'train', selected)
print('%8d target images selected' % (len(selected),))
print()
# Annealing temperature: starts at 1.0 and decreases exponentially over time
annealing = tf.Variable(initial_value=1.0, trainable=False, name='annealing')
halve_annealing = tf.assign(annealing, 0.5*annealing)
# Create and initialize training and testing models
train_model = dm_model.create_model(sess, source_images, target_images, annealing, verbose=True)
print("Building testing model...")
test_model = dm_model.create_model(sess, test_images, None, annealing)
print("Done.")
# Forget this line and TF will deadlock at the beginning of training
tf.train.start_queue_runners(sess=sess)
# Pack all for convenience
train_data = dm_utils.Container(locals())
return train_data
# TBD: Move to dm_infer.py?
def _get_inference_data():
# Setup global tensorflow state
sess = _setup_tensorflow()
# Load single image to use for inference
if FLAGS.infile is None:
raise ValueError('Must specify inference input file through `--infile <filename>` command line argument')
if not tf.gfile.Exists(FLAGS.infile) or tf.gfile.IsDirectory(FLAGS.infile):
raise FileNotFoundError('File `%s` does not exist or is a directory' % (FLAGS.infile,))
filenames = [FLAGS.infile]
infer_images = dm_input.input_data(sess, 'inference', filenames)
print('Loading model...')
# Create inference model
infer_model = dm_model.create_model(sess, infer_images)
# Load model parameters from checkpoint
checkpoint = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
try:
saver = tf.train.Saver()
saver.restore(sess, checkpoint.model_checkpoint_path)
del saver
del checkpoint
except:
raise RuntimeError('Unable to read checkpoint from `%s`' % (FLAGS.checkpoint_dir,))
print('Done.')
# Pack all for convenience
infer_data = dm_utils.Container(locals())
return infer_data
def main(argv=None):
if FLAGS.run == 'train':
train_data = _get_train_data()
dm_train.train_model(train_data)
elif FLAGS.run == 'inference':
infer_data = _get_inference_data()
dm_infer.inference(infer_data)
else:
print("Operation `%s` not supported" % (FLAGS.run,))
if __name__ == '__main__':
dm_flags.define_flags()
tf.app.run()
| [
"tensorflow.gfile.Exists",
"numpy.random.seed",
"tensorflow.ConfigProto",
"tensorflow.assign",
"tensorflow.Variable",
"tensorflow.set_random_seed",
"tensorflow.train.start_queue_runners",
"random.seed",
"tensorflow.gfile.DeleteRecursively",
"tensorflow.app.run",
"tensorflow.train.get_checkpoint_... | [((511, 553), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'log_device_placement': '(False)'}), '(log_device_placement=False)\n', (525, 553), True, 'import tensorflow as tf\n'), ((602, 627), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (612, 627), True, 'import tensorflow as tf\n'), ((770, 800), 'random.seed', 'random.seed', (['FLAGS.random_seed'], {}), '(FLAGS.random_seed)\n', (781, 800), False, 'import random\n'), ((805, 838), 'numpy.random.seed', 'np.random.seed', (['FLAGS.random_seed'], {}), '(FLAGS.random_seed)\n', (819, 838), True, 'import numpy as np\n'), ((1100, 1132), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['FLAGS.train_dir'], {}), '(FLAGS.train_dir)\n', (1115, 1132), True, 'import tensorflow as tf\n'), ((1240, 1274), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['FLAGS.train_dir'], {}), '(FLAGS.train_dir)\n', (1257, 1274), True, 'import tensorflow as tf\n'), ((3279, 3318), 'dm_celeba.select_samples', 'dm_celeba.select_samples', (['source_filter'], {}), '(source_filter)\n', (3303, 3318), False, 'import dm_celeba\n'), ((3339, 3383), 'dm_input.input_data', 'dm_input.input_data', (['sess', '"""train"""', 'selected'], {}), "(sess, 'train', selected)\n", (3358, 3383), False, 'import dm_input\n'), ((3404, 3447), 'dm_input.input_data', 'dm_input.input_data', (['sess', '"""test"""', 'selected'], {}), "(sess, 'test', selected)\n", (3423, 3447), False, 'import dm_input\n'), ((3528, 3567), 'dm_celeba.select_samples', 'dm_celeba.select_samples', (['target_filter'], {}), '(target_filter)\n', (3552, 3567), False, 'import dm_celeba\n'), ((3588, 3632), 'dm_input.input_data', 'dm_input.input_data', (['sess', '"""train"""', 'selected'], {}), "(sess, 'train', selected)\n", (3607, 3632), False, 'import dm_input\n'), ((3802, 3867), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': '(1.0)', 'trainable': '(False)', 'name': '"""annealing"""'}), "(initial_value=1.0, trainable=False, name='annealing')\n", (3813, 3867), True, 'import tensorflow as tf\n'), ((3890, 3927), 'tensorflow.assign', 'tf.assign', (['annealing', '(0.5 * annealing)'], {}), '(annealing, 0.5 * annealing)\n', (3899, 3927), True, 'import tensorflow as tf\n'), ((4002, 4088), 'dm_model.create_model', 'dm_model.create_model', (['sess', 'source_images', 'target_images', 'annealing'], {'verbose': '(True)'}), '(sess, source_images, target_images, annealing,\n verbose=True)\n', (4023, 4088), False, 'import dm_model\n'), ((4144, 4201), 'dm_model.create_model', 'dm_model.create_model', (['sess', 'test_images', 'None', 'annealing'], {}), '(sess, test_images, None, annealing)\n', (4165, 4201), False, 'import dm_model\n'), ((4303, 4342), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess'}), '(sess=sess)\n', (4331, 4342), True, 'import tensorflow as tf\n'), ((5017, 5066), 'dm_input.input_data', 'dm_input.input_data', (['sess', '"""inference"""', 'filenames'], {}), "(sess, 'inference', filenames)\n", (5036, 5066), False, 'import dm_input\n'), ((5146, 5187), 'dm_model.create_model', 'dm_model.create_model', (['sess', 'infer_images'], {}), '(sess, infer_images)\n', (5167, 5187), False, 'import dm_model\n'), ((5250, 5301), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['FLAGS.checkpoint_dir'], {}), '(FLAGS.checkpoint_dir)\n', (5279, 5301), True, 'import tensorflow as tf\n'), ((6023, 6046), 'dm_flags.define_flags', 'dm_flags.define_flags', ([], {}), '()\n', (6044, 6046), False, 'import dm_flags\n'), ((6051, 6063), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (6061, 6063), True, 'import tensorflow as tf\n'), ((723, 760), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['FLAGS.random_seed'], {}), '(FLAGS.random_seed)\n', (741, 760), True, 'import tensorflow as tf\n'), ((977, 1014), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['FLAGS.checkpoint_dir'], {}), '(FLAGS.checkpoint_dir)\n', (992, 1014), True, 'import tensorflow as tf\n'), ((1024, 1063), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['FLAGS.checkpoint_dir'], {}), '(FLAGS.checkpoint_dir)\n', (1041, 1063), True, 'import tensorflow as tf\n'), ((4827, 4861), 'tensorflow.gfile.IsDirectory', 'tf.gfile.IsDirectory', (['FLAGS.infile'], {}), '(FLAGS.infile)\n', (4847, 4861), True, 'import tensorflow as tf\n'), ((5327, 5343), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (5341, 5343), True, 'import tensorflow as tf\n'), ((5770, 5802), 'dm_train.train_model', 'dm_train.train_model', (['train_data'], {}), '(train_data)\n', (5790, 5802), False, 'import dm_train\n'), ((1159, 1202), 'tensorflow.gfile.DeleteRecursively', 'tf.gfile.DeleteRecursively', (['FLAGS.train_dir'], {}), '(FLAGS.train_dir)\n', (1185, 1202), True, 'import tensorflow as tf\n'), ((1322, 1352), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['FLAGS.dataset'], {}), '(FLAGS.dataset)\n', (1337, 1352), True, 'import tensorflow as tf\n'), ((1369, 1404), 'tensorflow.gfile.IsDirectory', 'tf.gfile.IsDirectory', (['FLAGS.dataset'], {}), '(FLAGS.dataset)\n', (1389, 1404), True, 'import tensorflow as tf\n'), ((4794, 4823), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['FLAGS.infile'], {}), '(FLAGS.infile)\n', (4809, 4823), True, 'import tensorflow as tf\n'), ((5889, 5919), 'dm_infer.inference', 'dm_infer.inference', (['infer_data'], {}), '(infer_data)\n', (5907, 5919), False, 'import dm_infer\n')] |
import time
import numpy as np
from .integrators import BackwardsEuler, make_symmetric_random
# ==================================
class SimulatorEnv:
"""class for simulation environment
Usage:
env = SimulatorEnv(coefficient, init_state, target_state, time_limit, euler_limit, delta, eps_euler, eps_target, lambda_distance_reward)
next_state, reward, done, info, distance_to_target = env(action)
See `test_simulator` for concrete example.
"""
def __init__(self, coefficient, init_state, target_state, original_perturb,
action_index, max_action, time_limit, euler_limit, delta, eps_euler, eps_target,
lambda_distance_reward, goal_condition, random):
self.num_genes = coefficient.shape[0]
self.coefficient = coefficient # np.array [num_genes, num_genes]
self.original_perturb = original_perturb # original perturb, if certain genes is not actionable through action, it remains the original perturbation term
self.target_state = target_state # np.array [num_genes,]
self.init_state = init_state # np.array [num_genes,]
self.state_space = len(self.init_state)
self.time_limit = time_limit # time limit for an episode, one eposode corresponds to certain number of actions
self.euler_limit = euler_limit # the maximum delta_t can take for integration when calling backward euler
self.delta = delta # delta in integration method
self.eps_euler = eps_euler # epsilon used in backward euler
self.eps_target = eps_target # epsilon used for deciding if current state is close enough to final state
self.lambda_distance_reward = lambda_distance_reward # lambda that controls the weight for the reward
self.random = random
self.integrator = BackwardsEuler(ode_coefficients=self.coefficient,
cutoff=self.euler_limit,
delta=self.delta,
epsilon=self.eps_euler)
# define goal space / goal condition
self.goal_space = self.state_space
self.goal_condition = goal_condition
# initialize state
self.state = init_state
self.accumulate_step = 0
self.reset() # reassign the state into self.init_state
# define action space
self.action_index = action_index # a list, define action space index (e.g [1,3,4,8]), indicating which genes in [num_genes,] vector is actionable
self.action_space = len(self.action_index)
self.max_action = max_action # scalar,
# final check
self.self_assert()
# Original GAP
self.origin_gap = ((self.init_state - self.target_state) ** 2).sum()
def self_assert(self):
"""check if the init is reasonable"""
assert self.action_space < self.state_space # only selected not all of the genes can be modified
assert self.state_space == len(self.target_state) # target space is matched with the init state space
def get_reward(self, next_state, t, goal=None):
"""given next state, calculating the reward"""
# distance to target state
if goal is None:
distance_to_target = np.abs(self.target_state - next_state).sum()
else:
distance_to_target = np.abs(goal - next_state).sum()
# # calculate reward
reward = - distance_to_target * self.lambda_distance_reward
# eval if the episode ends
if distance_to_target < self.eps_target:
done = True
info = "reach the goal (error {})".format(distance_to_target)
reward += 100
elif t >= self.time_limit:
done = True
reward += -1
info = "reach the end of episode (step {}). reward {}".format(t, reward)
else:
done = False
reward += -1
info = "keep trying, error {}, reward {} step {} / {}".format(distance_to_target, reward, t, self.time_limit)
return reward, done, info, distance_to_target
def step(self, action):
"""take an action (perturb), output (next_state, reward, done, info)
param:
- action: # [self.action_space, ] should be a numpy vector
"""
# construct the resulted perturb
assert len(action) == self.action_space
perturb = self.original_perturb
# print("perturb", perturb[self.action_index].shape)
perturb[self.action_index] = action
# use backward euler for integrate
next_state = self.integrator.get_next(self.state, perturb)
# calculate next state
reward, done, info, distance_to_target = self.get_reward(next_state, self.accumulate_step)
# update the state
self.state = next_state
self.accumulate_step += 1
# goal condition
if self.goal_condition:
next_state = np.concatenate((next_state, self.target_state))
return next_state, reward, done, info
def norm(self, vec):
return np.exp(vec) / np.exp(vec).sum()
def reset(self, seed=0):
"""reset the env"""
if self.random:
np.random.seed(int(time.time()/3.243))
self.state = self.norm(np.random.random_sample(self.init_state.shape))
self.target_state = self.norm(np.random.random_sample(self.target_state.shape))
else:
self.state = self.init_state
self.accumulate_step = 0
if self.goal_condition:
return np.concatenate((self.state, self.target_state))
else:
return self.state
def sample_action(self):
"""sample a random action from"""
action = np.random.uniform(-self.max_action, self.max_action, self.action_space)
return action
def render(self):
"""render the sense if called"""
pass
# store the parameters for specific environment
def make(env_name, seed_network, seed_init, goal_condition=False, random_init_target=False):
"""Automatically make environment"""
if env_name == 'random_generate':
# tunable parameter
num_genes = 100
time_limit = 200
euler_limit = 16000
action_percent = 0.3
delta = 1e-2
eps_euler = 1e-4
eps_target = 1e-2
lambda_distance_reward = 0.1 # reward = - distance_to_target * lambda_distance_reward - 1
max_action = 3
# init the network structure and the actionable genes
np.random.seed(seed_network)
coefficient = make_symmetric_random(num_genes) # random
action_space = int(num_genes * action_percent)
action_index = np.random.randint(num_genes, size=(action_space,)) # random
# init the initial state, the target state
np.random.seed(seed_init)
init_state = np.random.rand(num_genes,) # random
target_state = np.random.rand(num_genes, ) # random
original_perturb = np.zeros(num_genes, ) # random
elif env_name == 'random_generate_td3_simple':
# tunable parameter
num_genes = 10
time_limit = 100
euler_limit = 16000
action_percent = 0.4
delta = 1e-1
eps_euler = 1e-4
eps_target = 1
lambda_distance_reward = 1 # reward = - distance_to_target * lambda_distance_reward - 1
max_action = 2
# init the network structure and the actionable genes
np.random.seed(seed_network)
coefficient = make_symmetric_random(num_genes) # random
action_space = int(num_genes * action_percent)
action_index = np.random.randint(num_genes, size=(action_space,)) # random
# init the initial state, the target state
np.random.seed(seed_init)
init_state = np.random.rand(num_genes,) # random
target_state = np.random.rand(num_genes, ) # random
original_perturb = np.random.rand(num_genes, ) # random
elif env_name == 'random_generate_td3_simple_correctmaxact':
# tunable parameter
num_genes = 10
time_limit = 100
euler_limit = 16000
action_percent = 0.4
delta = 1e-1
eps_euler = 1e-4
eps_target = 1e-2
lambda_distance_reward = 1 # reward = - distance_to_target * lambda_distance_reward - 1
max_action = 2
# init the network structure and the actionable genes
np.random.seed(seed_network)
coefficient = make_symmetric_random(num_genes) # random
# coefficient = np.random.rand(num_genes, num_genes)
action_space = int(num_genes * action_percent)
action_index = np.random.randint(num_genes, size=(action_space,)) # random
# init the initial state, the target state
np.random.seed(seed_init)
init_state = np.random.rand(num_genes,) # random
target_state = np.random.rand(num_genes, ) # random
original_perturb = np.zeros(num_genes, ) # random
elif env_name == 'infer_from_data':
pass
else:
raise NotImplementedError("Env {} not implemented. ".format(env_name))
print(action_index.shape)
print("ORIGIN GAP:", ((init_state - target_state)**2).sum())
# define env
env = SimulatorEnv(coefficient, init_state, target_state,
original_perturb, action_index, max_action, time_limit, euler_limit,
delta, eps_euler, eps_target, lambda_distance_reward, goal_condition, random_init_target)
return env
# return stop
def test_simulator():
# define parameters
env = make('random_generate', seed_network=0, seed_init=1)
train_steps = 300
episode = 0
for i in range(train_steps):
# generate random action, replace this with policy network in reinforcement learning
action = np.random.rand(env.action_space, 1) * env.max_action
# put action into environment for evaluation
start_time = time.time()
next_state, reward, done, info = env.step(action) # env will update its current state after taking every step
distance_to_target = ((next_state - env.target_state) ** 2).sum()
end_time = time.time()
time_delta_step = end_time - start_time
print("time: {:.4f} s; reward: {}; done {}; distance_to_target {}; Info: {}".format(time_delta_step, reward, done,
distance_to_target,info))
# print(info)
if done:
print("Episode {} End. ----------\n".format(episode))
env.reset()
episode += 1
if __name__ == '__main__':
test_simulator()
| [
"numpy.random.uniform",
"numpy.random.seed",
"numpy.random.random_sample",
"numpy.abs",
"numpy.zeros",
"time.time",
"numpy.random.randint",
"numpy.exp",
"numpy.random.rand",
"numpy.concatenate"
] | [((5802, 5873), 'numpy.random.uniform', 'np.random.uniform', (['(-self.max_action)', 'self.max_action', 'self.action_space'], {}), '(-self.max_action, self.max_action, self.action_space)\n', (5819, 5873), True, 'import numpy as np\n'), ((6602, 6630), 'numpy.random.seed', 'np.random.seed', (['seed_network'], {}), '(seed_network)\n', (6616, 6630), True, 'import numpy as np\n'), ((6774, 6824), 'numpy.random.randint', 'np.random.randint', (['num_genes'], {'size': '(action_space,)'}), '(num_genes, size=(action_space,))\n', (6791, 6824), True, 'import numpy as np\n'), ((6895, 6920), 'numpy.random.seed', 'np.random.seed', (['seed_init'], {}), '(seed_init)\n', (6909, 6920), True, 'import numpy as np\n'), ((6942, 6967), 'numpy.random.rand', 'np.random.rand', (['num_genes'], {}), '(num_genes)\n', (6956, 6967), True, 'import numpy as np\n'), ((7002, 7027), 'numpy.random.rand', 'np.random.rand', (['num_genes'], {}), '(num_genes)\n', (7016, 7027), True, 'import numpy as np\n'), ((7067, 7086), 'numpy.zeros', 'np.zeros', (['num_genes'], {}), '(num_genes)\n', (7075, 7086), True, 'import numpy as np\n'), ((10054, 10065), 'time.time', 'time.time', ([], {}), '()\n', (10063, 10065), False, 'import time\n'), ((10278, 10289), 'time.time', 'time.time', ([], {}), '()\n', (10287, 10289), False, 'import time\n'), ((5007, 5054), 'numpy.concatenate', 'np.concatenate', (['(next_state, self.target_state)'], {}), '((next_state, self.target_state))\n', (5021, 5054), True, 'import numpy as np\n'), ((5142, 5153), 'numpy.exp', 'np.exp', (['vec'], {}), '(vec)\n', (5148, 5153), True, 'import numpy as np\n'), ((5621, 5668), 'numpy.concatenate', 'np.concatenate', (['(self.state, self.target_state)'], {}), '((self.state, self.target_state))\n', (5635, 5668), True, 'import numpy as np\n'), ((7544, 7572), 'numpy.random.seed', 'np.random.seed', (['seed_network'], {}), '(seed_network)\n', (7558, 7572), True, 'import numpy as np\n'), ((7716, 7766), 'numpy.random.randint', 'np.random.randint', (['num_genes'], {'size': '(action_space,)'}), '(num_genes, size=(action_space,))\n', (7733, 7766), True, 'import numpy as np\n'), ((7837, 7862), 'numpy.random.seed', 'np.random.seed', (['seed_init'], {}), '(seed_init)\n', (7851, 7862), True, 'import numpy as np\n'), ((7884, 7909), 'numpy.random.rand', 'np.random.rand', (['num_genes'], {}), '(num_genes)\n', (7898, 7909), True, 'import numpy as np\n'), ((7944, 7969), 'numpy.random.rand', 'np.random.rand', (['num_genes'], {}), '(num_genes)\n', (7958, 7969), True, 'import numpy as np\n'), ((8009, 8034), 'numpy.random.rand', 'np.random.rand', (['num_genes'], {}), '(num_genes)\n', (8023, 8034), True, 'import numpy as np\n'), ((9927, 9962), 'numpy.random.rand', 'np.random.rand', (['env.action_space', '(1)'], {}), '(env.action_space, 1)\n', (9941, 9962), True, 'import numpy as np\n'), ((5342, 5388), 'numpy.random.random_sample', 'np.random.random_sample', (['self.init_state.shape'], {}), '(self.init_state.shape)\n', (5365, 5388), True, 'import numpy as np\n'), ((5432, 5480), 'numpy.random.random_sample', 'np.random.random_sample', (['self.target_state.shape'], {}), '(self.target_state.shape)\n', (5455, 5480), True, 'import numpy as np\n'), ((8508, 8536), 'numpy.random.seed', 'np.random.seed', (['seed_network'], {}), '(seed_network)\n', (8522, 8536), True, 'import numpy as np\n'), ((8741, 8791), 'numpy.random.randint', 'np.random.randint', (['num_genes'], {'size': '(action_space,)'}), '(num_genes, size=(action_space,))\n', (8758, 8791), True, 'import numpy as np\n'), ((8862, 8887), 'numpy.random.seed', 'np.random.seed', (['seed_init'], {}), '(seed_init)\n', (8876, 8887), True, 'import numpy as np\n'), ((8909, 8934), 'numpy.random.rand', 'np.random.rand', (['num_genes'], {}), '(num_genes)\n', (8923, 8934), True, 'import numpy as np\n'), ((8969, 8994), 'numpy.random.rand', 'np.random.rand', (['num_genes'], {}), '(num_genes)\n', (8983, 8994), True, 'import numpy as np\n'), ((9034, 9053), 'numpy.zeros', 'np.zeros', (['num_genes'], {}), '(num_genes)\n', (9042, 9053), True, 'import numpy as np\n'), ((3287, 3325), 'numpy.abs', 'np.abs', (['(self.target_state - next_state)'], {}), '(self.target_state - next_state)\n', (3293, 3325), True, 'import numpy as np\n'), ((3379, 3404), 'numpy.abs', 'np.abs', (['(goal - next_state)'], {}), '(goal - next_state)\n', (3385, 3404), True, 'import numpy as np\n'), ((5156, 5167), 'numpy.exp', 'np.exp', (['vec'], {}), '(vec)\n', (5162, 5167), True, 'import numpy as np\n'), ((5287, 5298), 'time.time', 'time.time', ([], {}), '()\n', (5296, 5298), False, 'import time\n')] |
#!/usr/bin/env python
# license removed for brevity
import os
import sys
current_folder = os.path.dirname(os.path.realpath(__file__))
sys.path.append(current_folder)
import numpy as np
import subprocess
from subprocess import Popen, PIPE
class SYS_UTILS:
#PUBLIC
#PRIVATE
def __init__(self):
pass
def __call__(self, cmd):
return self.sys_call(cmd)
def sys_call(self, cmd):
p = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE)
stdout = p.stdout.read()
stderr = p.stderr.read()
#output = p.stdin.write()
return stdout, stderr
def sys_check_output(self, cmd):
try:
return subprocess.check_output(cmd)
except subprocess.CalledProcessError as excp:
print("command error : {}".format(cmd))
return excp.output
def msg_line_split(self, msg):
lines = msg.split(os.linesep)
line_new = []
for i in range(len(lines)):
if len(lines[i]) > 0:
line_new = np.append(line_new, lines[i])
'''
for i in range(len(lines)):
line = lines[i]
vals = line.split(":")
if len(vals) > 1:
for j in range(len(vals)):
vals[j] = vals[j].strip()
'''
return line_new
def msg_split(self, msg, sign = ' '):
vals = msg.split(sign)
val_new = []
for i in range(len(vals)):
val_new = np.append(val_new, vals[i].strip())
return val_new
'''
def get_cpu_info(self):
output = self.sys_call(["lscpu"])
lines = output.split(os.linesep)
for i in range(len(lines)):
line = lines[i]
vals = line.split(":")
if len(vals) > 1:
for j in range(len(vals)):
vals[j] = vals[j].strip()
self.CPU_INFO[vals[0]] = vals[1]
self.SOCKETS = int(self.CPU_INFO['Socket(s)'])
self.CORES = int(int(self.CPU_INFO['Core(s) per socket']) * self.SOCKETS)
self.THREADS = int(int(self.CPU_INFO['Thread(s) per core']) * self.CORES)
return self.CPU_INFO
'''
'''
sys = SYS_UTILS()
#cmd = ["airodump-ng", "mon0"]
cmd = ["ifconfig"]
#a = sys(["ifconfig"])
#a = sys(cmd)
a, b = sys.sys_call(cmd)
#a = sys.sys_check_output(cmd)
a = sys.msg_line_split(a)
print(a)
#print(b)
'''
| [
"sys.path.append",
"subprocess.Popen",
"subprocess.check_output",
"os.path.realpath",
"numpy.append"
] | [((139, 170), 'sys.path.append', 'sys.path.append', (['current_folder'], {}), '(current_folder)\n', (154, 170), False, 'import sys\n'), ((110, 136), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (126, 136), False, 'import os\n'), ((468, 516), 'subprocess.Popen', 'Popen', (['cmd'], {'stdout': 'PIPE', 'stderr': 'PIPE', 'stdin': 'PIPE'}), '(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE)\n', (473, 516), False, 'from subprocess import Popen, PIPE\n'), ((733, 761), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {}), '(cmd)\n', (756, 761), False, 'import subprocess\n'), ((1110, 1139), 'numpy.append', 'np.append', (['line_new', 'lines[i]'], {}), '(line_new, lines[i])\n', (1119, 1139), True, 'import numpy as np\n')] |
# Copyright (C) 2019 ByteDance Inc
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import argparse
import modeling
import numpy as np
import os
import tensorflow as tf
import effective_transformer
# disable tensorflow debugging information
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
tf.compat.v1.disable_eager_execution()
def main(args):
bert_config = modeling.BertConfig.from_json_file(args.config)
bert_config.hidden_dropout_prob = 0.0
bert_config.attention_probs_dropout_prob = 0.0
batch_size = args.batch_size
avg_seq_len = args.avg_seq_length
max_seq_len = args.max_seq_length
tf_dtype = tf.float16 if args.precision =='fp16' else tf.float32
if args.precision == 'fp16':
policy = tf.keras.mixed_precision.Policy('mixed_float16')
tf.keras.mixed_precision.set_global_policy(policy)
# fake input array length
input_len = np.random.randint(
low = 2 * avg_seq_len - max_seq_len, high = max_seq_len + 1,
size = (batch_size), dtype = np.int32)
valid_word_num = sum(input_len)
# fake input id and mask
input_ids = np.random.randint(
low = 0, high = bert_config.vocab_size,
size = (batch_size, max_seq_len), dtype = np.int32)
input_mask = np.zeros((batch_size, max_seq_len), dtype = np.int32)
for b_idx, s_len in enumerate(input_len) :
input_mask[b_idx][:s_len] = 1
input_ids_tensor = tf.convert_to_tensor(input_ids, dtype = tf.int32)
input_mask_tensor = tf.convert_to_tensor(input_mask, dtype = tf.int32)
# fake embedding output
embed_output = np.random.randn(batch_size, max_seq_len, bert_config.hidden_size)
input_tensor = tf.convert_to_tensor(embed_output, dtype = tf_dtype)
# keep attention_mask for compatible reason
att_mask = np.tile(input_mask, max_seq_len)
att_mask = att_mask.reshape(batch_size, max_seq_len, max_seq_len)
attention_mask = tf.convert_to_tensor(att_mask, dtype = tf_dtype)
# input info
valid_word_num = sum(input_len)
print("Valid word num : {}/{}, avg sequence length : {:.6} ".format(
valid_word_num, batch_size * max_seq_len, valid_word_num / batch_size))
# bert with standard transformer
std_bert = modeling.transformer_model(
input_tensor = input_tensor,
attention_mask = attention_mask,
hidden_size = bert_config.hidden_size,
num_hidden_layers = bert_config.num_hidden_layers,
num_attention_heads = bert_config.num_attention_heads,
intermediate_size = bert_config.intermediate_size,
intermediate_act_fn = modeling.get_activation(bert_config.hidden_act),
hidden_dropout_prob = bert_config.hidden_dropout_prob,
attention_probs_dropout_prob = bert_config.attention_probs_dropout_prob,
initializer_range = bert_config.initializer_range,
do_return_all_layers = False)
config = tf.compat.v1.ConfigProto()
config.graph_options.optimizer_options.global_jit_level = tf.compat.v1.OptimizerOptions.ON_1
with tf.compat.v1.Session(config=config) as sess:
# init weights
sess.run(tf.compat.v1.global_variables_initializer())
# get transformer weights
all_vars = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)
transformer_vars = [tf.cast(v, dtype=tf_dtype) for v in all_vars if v.name.startswith('layer')]
weights_value = sess.run(transformer_vars)
# bert with effective transformer
et_bert = effective_transformer.get_sequence_output(
max_batch_size = batch_size,
max_seq_length = max_seq_len,
config = bert_config,
attention_mask = attention_mask,
input_mask = input_mask_tensor,
from_tensor = input_tensor,
weights_value = weights_value,
)
# diff
val1 = sess.run(std_bert).reshape(-1, 768)
val2 = sess.run(et_bert).reshape(-1, 768)
diff = []
for b_idx, s_len in enumerate(input_len) :
for w_idx in range(s_len) :
idx = b_idx * args.max_seq_length + w_idx
diff.append(np.fabs(val1[idx] - val2[idx]).max())
print("max diff : {:.6}, avg diff : {:.6}.".format(max(diff), sum(diff) / len(diff)))
def time_inference(output_tensor) :
iter_num = 128
# warm up
for i in range(10) :
sess.run(output_tensor)
beg = datetime.now()
for i in range(iter_num):
sess.run(output_tensor)
end = datetime.now()
return (end - beg).total_seconds() * 1000 / iter_num # ms
print("xla cost : {:.6} ms".format(time_inference(std_bert)))
print("et cost : {:.6} ms".format(time_inference(et_bert)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'Bert performance measuring sample.')
parser.add_argument(
'-c', '--config', type = str, default = 'bert_config.json', help = 'Bert config file.')
parser.add_argument(
'-p', '--precision', type = str, default = 'fp16', choices=['fp32', 'fp16'], help = 'Weight precision.')
parser.add_argument(
'-b', '--batch_size', type = int, default = 128, help = 'Batch size.')
parser.add_argument(
'-m', '--max_seq_length', type = int, default = 32, help = 'Max sequence length.')
parser.add_argument(
'-a', '--avg_seq_length', type = int, default = 20, help = 'Average sequence length.')
args = parser.parse_args()
main(args)
| [
"argparse.ArgumentParser",
"effective_transformer.get_sequence_output",
"tensorflow.compat.v1.disable_eager_execution",
"numpy.random.randint",
"modeling.BertConfig.from_json_file",
"numpy.tile",
"tensorflow.compat.v1.global_variables_initializer",
"numpy.random.randn",
"tensorflow.keras.mixed_preci... | [((807, 869), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.ERROR'], {}), '(tf.compat.v1.logging.ERROR)\n', (841, 869), True, 'import tensorflow as tf\n'), ((871, 909), 'tensorflow.compat.v1.disable_eager_execution', 'tf.compat.v1.disable_eager_execution', ([], {}), '()\n', (907, 909), True, 'import tensorflow as tf\n'), ((943, 990), 'modeling.BertConfig.from_json_file', 'modeling.BertConfig.from_json_file', (['args.config'], {}), '(args.config)\n', (977, 990), False, 'import modeling\n'), ((1444, 1555), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(2 * avg_seq_len - max_seq_len)', 'high': '(max_seq_len + 1)', 'size': 'batch_size', 'dtype': 'np.int32'}), '(low=2 * avg_seq_len - max_seq_len, high=max_seq_len + 1,\n size=batch_size, dtype=np.int32)\n', (1461, 1555), True, 'import numpy as np\n'), ((1649, 1754), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'bert_config.vocab_size', 'size': '(batch_size, max_seq_len)', 'dtype': 'np.int32'}), '(low=0, high=bert_config.vocab_size, size=(batch_size,\n max_seq_len), dtype=np.int32)\n', (1666, 1754), True, 'import numpy as np\n'), ((1784, 1835), 'numpy.zeros', 'np.zeros', (['(batch_size, max_seq_len)'], {'dtype': 'np.int32'}), '((batch_size, max_seq_len), dtype=np.int32)\n', (1792, 1835), True, 'import numpy as np\n'), ((1940, 1987), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['input_ids'], {'dtype': 'tf.int32'}), '(input_ids, dtype=tf.int32)\n', (1960, 1987), True, 'import tensorflow as tf\n'), ((2013, 2061), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['input_mask'], {'dtype': 'tf.int32'}), '(input_mask, dtype=tf.int32)\n', (2033, 2061), True, 'import tensorflow as tf\n'), ((2109, 2174), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'max_seq_len', 'bert_config.hidden_size'], {}), '(batch_size, max_seq_len, bert_config.hidden_size)\n', (2124, 2174), True, 'import numpy as np\n'), ((2192, 2242), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['embed_output'], {'dtype': 'tf_dtype'}), '(embed_output, dtype=tf_dtype)\n', (2212, 2242), True, 'import tensorflow as tf\n'), ((2305, 2337), 'numpy.tile', 'np.tile', (['input_mask', 'max_seq_len'], {}), '(input_mask, max_seq_len)\n', (2312, 2337), True, 'import numpy as np\n'), ((2425, 2471), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['att_mask'], {'dtype': 'tf_dtype'}), '(att_mask, dtype=tf_dtype)\n', (2445, 2471), True, 'import tensorflow as tf\n'), ((3457, 3483), 'tensorflow.compat.v1.ConfigProto', 'tf.compat.v1.ConfigProto', ([], {}), '()\n', (3481, 3483), True, 'import tensorflow as tf\n'), ((5234, 5307), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Bert performance measuring sample."""'}), "(description='Bert performance measuring sample.')\n", (5257, 5307), False, 'import argparse\n'), ((1297, 1345), 'tensorflow.keras.mixed_precision.Policy', 'tf.keras.mixed_precision.Policy', (['"""mixed_float16"""'], {}), "('mixed_float16')\n", (1328, 1345), True, 'import tensorflow as tf\n'), ((1350, 1400), 'tensorflow.keras.mixed_precision.set_global_policy', 'tf.keras.mixed_precision.set_global_policy', (['policy'], {}), '(policy)\n', (1392, 1400), True, 'import tensorflow as tf\n'), ((3586, 3621), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'config': 'config'}), '(config=config)\n', (3606, 3621), True, 'import tensorflow as tf\n'), ((3754, 3825), 'tensorflow.compat.v1.get_collection', 'tf.compat.v1.get_collection', (['tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES'], {}), '(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)\n', (3781, 3825), True, 'import tensorflow as tf\n'), ((4026, 4271), 'effective_transformer.get_sequence_output', 'effective_transformer.get_sequence_output', ([], {'max_batch_size': 'batch_size', 'max_seq_length': 'max_seq_len', 'config': 'bert_config', 'attention_mask': 'attention_mask', 'input_mask': 'input_mask_tensor', 'from_tensor': 'input_tensor', 'weights_value': 'weights_value'}), '(max_batch_size=batch_size,\n max_seq_length=max_seq_len, config=bert_config, attention_mask=\n attention_mask, input_mask=input_mask_tensor, from_tensor=input_tensor,\n weights_value=weights_value)\n', (4067, 4271), False, 'import effective_transformer\n'), ((3143, 3190), 'modeling.get_activation', 'modeling.get_activation', (['bert_config.hidden_act'], {}), '(bert_config.hidden_act)\n', (3166, 3190), False, 'import modeling\n'), ((3663, 3706), 'tensorflow.compat.v1.global_variables_initializer', 'tf.compat.v1.global_variables_initializer', ([], {}), '()\n', (3704, 3706), True, 'import tensorflow as tf\n'), ((3850, 3876), 'tensorflow.cast', 'tf.cast', (['v'], {'dtype': 'tf_dtype'}), '(v, dtype=tf_dtype)\n', (3857, 3876), True, 'import tensorflow as tf\n'), ((4893, 4907), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4905, 4907), False, 'from datetime import datetime\n'), ((4984, 4998), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4996, 4998), False, 'from datetime import datetime\n'), ((4608, 4638), 'numpy.fabs', 'np.fabs', (['(val1[idx] - val2[idx])'], {}), '(val1[idx] - val2[idx])\n', (4615, 4638), True, 'import numpy as np\n')] |
import warnings
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=Warning)
import tensorflow as tf
import numpy as np
from pprint import pprint
import time
import platform
H=256
W=256
THREADS=4
# MODEL='flyingthings_finalpass_xl'
# CHANNEL=6
MODEL='eth3d'
CHANNEL=2
# MODEL='middlebury_d400'
# CHANNEL=6
interpreter = tf.lite.Interpreter(f'{MODEL}/saved_model_{H}x{W}/model_float32.tflite', num_threads=THREADS)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_shape = input_details[0]['shape']
input_height = input_shape[1]
input_width = input_shape[2]
channels = input_shape[3]
size = (1, input_height, input_width, CHANNEL)
input_tensor = np.ones(size, dtype=np.float32)
start = time.perf_counter()
roop_count = 10
reference_output_disparity = None
for i in range(roop_count):
interpreter.set_tensor(input_details[0]['index'], input_tensor)
interpreter.invoke()
reference_output_disparity = interpreter.get_tensor(output_details[0]['index'])
inference_time = (time.perf_counter() - start) / roop_count
# pprint(reference_output_disparity)
print(f'Model: {MODEL}')
print(f'Input resolution: {H}x{W}')
print(f'Number of Threads: {THREADS}')
print(f'Platform: {platform.platform()}')
print(f'Average of {roop_count} times inference: {(inference_time * 1000):.1f}ms')
"""
$ python3 test.py
INFO: Created TensorFlow Lite XNNPACK delegate for CPU.
INFO: Created TensorFlow Lite delegate for select TF ops.
INFO: TfLiteFlexDelegate delegate: 20 nodes delegated out of 772 nodes with 10 partitions.
Model: eth3d
Input resolution: 256x256
Number of Threads: 4
Platform: Linux-5.11.0-27-generic-x86_64-with-glibc2.29
Average of 10 times inference: 360.6ms
""" | [
"warnings.simplefilter",
"numpy.ones",
"time.perf_counter",
"platform.platform",
"tensorflow.lite.Interpreter"
] | [((65, 127), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (86, 127), False, 'import warnings\n'), ((128, 184), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'Warning'}), "(action='ignore', category=Warning)\n", (149, 184), False, 'import warnings\n'), ((432, 529), 'tensorflow.lite.Interpreter', 'tf.lite.Interpreter', (['f"""{MODEL}/saved_model_{H}x{W}/model_float32.tflite"""'], {'num_threads': 'THREADS'}), "(f'{MODEL}/saved_model_{H}x{W}/model_float32.tflite',\n num_threads=THREADS)\n", (451, 529), True, 'import tensorflow as tf\n'), ((845, 876), 'numpy.ones', 'np.ones', (['size'], {'dtype': 'np.float32'}), '(size, dtype=np.float32)\n', (852, 876), True, 'import numpy as np\n'), ((886, 905), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (903, 905), False, 'import time\n'), ((1180, 1199), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1197, 1199), False, 'import time\n'), ((1378, 1397), 'platform.platform', 'platform.platform', ([], {}), '()\n', (1395, 1397), False, 'import platform\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
from gym import spaces
import neurogym as ngym
import matplotlib.pyplot as plt
class CVLearning(ngym.TrialEnv):
r"""Implements shaping for the delay-response task, in which agents
have to integrate two stimuli and report which one is larger on
average after a delay.
Args:
stim_scale: Controls the difficulty of the experiment. (def: 1., float)
max_num_reps: Maximum number of times that agent can go in a row
to the same side during phase 0. (def: 3, int)
th_stage: Performance threshold needed to proceed to the following
phase. (def: 0.7, float)
keep_days: Number of days that the agent will be kept in the same phase
once arrived to the goal performacance. (def: 1, int)
trials_day: Number of trials performed during one day. (def: 200, int)
perf_len: Number of trials used to compute instantaneous performance.
(def: 20, int)
stages: Stages used to train the agent. (def: [0, 1, 2, 3, 4], list)
"""
metadata = {
'paper_link': 'https://www.nature.com/articles/s41586-019-0919-7',
'paper_name': 'Discrete attractor dynamics underlies persistent' +
' activity in the frontal cortex',
'tags': ['perceptual', 'delayed response', 'two-alternative',
'supervised']
}
def __init__(self, dt=100, rewards=None, timing=None, stim_scale=1.,
sigma=1.0, max_num_reps=3, th_stage=0.7, keep_days=1,
trials_day=300, perf_len=20, stages=[0, 1, 2, 3, 4], n_ch=10):
super().__init__(dt=dt)
self.choices = [1, 2]
self.n_ch = n_ch # number of obs and actions different from fixation
# cohs specifies the amount of evidence
# (which is modulated by stim_scale)
self.cohs = np.array([0, 6.4, 12.8, 25.6, 51.2])*stim_scale
self.sigma = sigma / np.sqrt(self.dt) # Input noise
# Rewards
self.rewards = {'abort': -0.1, 'correct': +1., 'fail': -1.}
if rewards:
self.rewards.update(rewards)
self.delay_durs = [1000, 3000]
self.timing = {
'fixation': 200,
'stimulus': 1150,
'delay': lambda: self.rng.uniform(*self.delay_durs),
'decision': 1500}
if timing:
self.timing.update(timing)
self.stages = stages
self.r_fail = self.rewards['fail']
self.action = 0
self.abort = False
self.firstcounts = True # whether trial ends at first attempt
self.first_flag = False # whether first attempt has been done
self.ind = 0 # index of the current stage
if th_stage == -1:
self.curr_ph = self.stages[-1]
else:
self.curr_ph = self.stages[self.ind]
self.rew = 0
# PERFORMANCE VARIABLES
self.trials_counter = 0
# Day/session performance
self.curr_perf = 0
self.trials_day = trials_day
self.th_perf = th_stage
self.day_perf = np.empty(trials_day)
self.w_keep = [keep_days]*len(self.stages) # TODO: simplify??
# number of days to keep an agent on a stage
# once it has reached th_perf
self.days_keep = self.w_keep[self.ind]
self.keep_stage = False # wether the agent can move to the next stage
# Instantaneous performance (moving window)
self.inst_perf = 0
self.perf_len = perf_len # window length
self.mov_perf = np.zeros(perf_len)
# STAGE VARIABLES
# stage 0
# max number of consecutive times that an agent can repeat an action
# receiving positive reward on stage 0
self.max_num_reps = max_num_reps
# counter of consecutive actions at the same side
self.action_counter = 0
# stage 2
# min performance to keep the agent in stage 2
self.min_perf = 0.5 # TODO: no magic numbers
self.stage_reminder = False # control if a stage has been explored
# stage 3
self.inc_delays = 0 # proportion of the total delays dur to keep
self.delay_milestone = 0 # delays durs at the beggining of a day
# proportion of the total delays dur to incease every time that the
# agent reaches a threshold performance
self.inc_factor = 0.25
self.inc_delays_th = th_stage # th perf to increase delays in stage 3
self.dec_delays_th = 0.5 # th perf to decrease delays in stage 3
# number of trials spent on a specific delays duration
self.trials_delay = 0
self.max_delays = True # wheter delays have reached their max dur
self.dur = [0]*len(self.delay_durs)
# action and observation spaces
self.action_space = spaces.Discrete(n_ch+1)
self.observation_space = spaces.Box(-np.inf, np.inf, shape=(n_ch+1,),
dtype=np.float32)
def _new_trial(self, **kwargs):
"""
new_trial() is called when a trial ends to generate the next trial.
The following variables are created:
durations: Stores the duration of the different periods.
ground truth: Correct response for the trial.
coh: Stimulus coherence (evidence) for the trial.
obs: Observation.
"""
self.set_phase()
if self.curr_ph == 0:
# control that agent does not repeat side more than 3 times
self.count(self.action)
trial = {
'ground_truth': self.rng.choice(self.choices),
'coh': self.rng.choice(self.cohs),
'sigma': self.sigma,
}
# init durations with None
self.durs = {key: None for key in self.timing}
self.firstcounts = True
self.first_choice_rew = None
if self.curr_ph == 0:
# no stim, reward is in both left and right
# agent cannot go N times in a row to the same side
if np.abs(self.action_counter) >= self.max_num_reps:
ground_truth = 1 if self.action == 2 else 2
trial.update({'ground_truth': ground_truth})
self.rewards['fail'] = 0
else:
self.rewards['fail'] = self.rewards['correct']
self.durs.update({'stimulus': 0,
'delay': 0})
trial.update({'sigma': 0})
elif self.curr_ph == 1:
# stim introduced with no ambiguity
# wrong answer is not penalized
# agent can keep exploring until finding the right answer
self.durs.update({'delay': 0})
trial.update({'coh': 100})
trial.update({'sigma': 0})
self.rewards['fail'] = 0
self.firstcounts = False
elif self.curr_ph == 2:
# first answer counts
# wrong answer is penalized
self.durs.update({'delay': (0)})
trial.update({'coh': 100})
trial.update({'sigma': 0})
self.rewards['fail'] = self.r_fail
elif self.curr_ph == 3:
self.rewards['fail'] = self.r_fail
# increasing or decreasing delays durs
if self.trials_delay > self.perf_len:
if self.inst_perf >= self.inc_delays_th and\
self.inc_delays < 1:
self.inc_delays += self.inc_factor
self.trials_delay = 0
elif (self.inst_perf <= self.dec_delays_th and
self.inc_delays > self.delay_milestone):
self.inc_delays -= self.inc_factor
self.trials_delay = 0
self.dur = [int(d*self.inc_delays) for d in self.delay_durs]
if self.dur == self.delay_durs:
self.max_delays = True
else:
self.max_delays = False
self.durs.update({'delay': np.random.choice(self.dur)})
# delay component is introduced
trial.update({'coh': 100})
trial.update({'sigma': 0})
# phase 4: ambiguity component is introduced
self.first_flag = False
# ---------------------------------------------------------------------
# Trial
# ---------------------------------------------------------------------
trial.update(kwargs)
# ---------------------------------------------------------------------
# Periods
# ---------------------------------------------------------------------
self.add_period('fixation')
self.add_period('stimulus', duration=self.durs['stimulus'],
after='fixation')
self.add_period('delay', duration=self.durs['delay'],
after='stimulus')
self.add_period('decision', after='delay')
# define observations
self.set_ob([1]+[0]*self.n_ch, 'fixation')
stim = self.view_ob('stimulus')
stim[:, 0] = 1
stim[:, 1:3] = (1 - trial['coh']/100)/2
stim[:, trial['ground_truth']] = (1 + trial['coh']/100)/2
stim[:, 3:] = 0.5
stim[:, 1:] +=\
self.rng.randn(stim.shape[0], self.n_ch) * trial['sigma']
self.set_ob([1]+[0]*self.n_ch, 'delay')
self.set_groundtruth(trial['ground_truth'], 'decision')
return trial
def count(self, action):
'''
check the last three answers during stage 0 so the network has to
alternate between left and right
'''
if action != 0:
new = action - 2/action
if np.sign(self.action_counter) == np.sign(new):
self.action_counter += new
else:
self.action_counter = new
def set_phase(self):
# print(self.curr_ph)
self.day_perf[self.trials_counter] =\
1*(self.rew == self.rewards['correct'])
self.mov_perf[self.trials_counter % self.perf_len] =\
1*(self.rew == self.rewards['correct'])
self.trials_counter += 1
self.trials_delay += 1
# Instantaneous perfromace
if self.trials_counter > self.perf_len:
self.inst_perf = np.mean(self.mov_perf)
if self.inst_perf < self.min_perf and self.curr_ph == 2:
if 1 in self.stages:
self.curr_ph = 1
self.stage_reminder = True
self.ind -= 1
elif self.inst_perf > self.th_perf and self.stage_reminder:
self.curr_ph = 2
self.ind += 1
self.stage_reminder = False
# End of the day
if self.trials_counter >= self.trials_day:
self.trials_counter = 0
self.curr_perf = np.mean(self.day_perf)
self.day_perf = np.empty(self.trials_day)
self.delay_milestone = self.inc_delays
# Keeping or changing stage
if self.curr_perf >= self.th_perf and self.max_delays:
self.keep_stage = True
else:
self.keep_stage = False
self.days_keep = self.w_keep[self.ind]
if self.keep_stage:
if self.days_keep <= 0 and\
self.curr_ph < self.stages[-1]:
self.ind += 1
self.curr_ph = self.stages[self.ind]
self.days_keep = self.w_keep[self.ind] + 1
self.keep_stage = False
self.days_keep -= 1
def _step(self, action):
# obs, reward, done, info = self.env._step(action)
# ---------------------------------------------------------------------
new_trial = False
# rewards
reward = 0
gt = self.gt_now
first_choice = False
if action != 0 and not self.in_period('decision'):
new_trial = self.abort
reward = self.rewards['abort']
elif self.in_period('decision'):
if action == gt:
reward = self.rewards['correct']
new_trial = True
if not self.first_flag:
first_choice = True
self.first_flag = True
self.performance = 1
elif action == 3 - gt: # 3-action is the other act
reward = self.rewards['fail']
new_trial = self.firstcounts
if not self.first_flag:
first_choice = True
self.first_flag = True
self.performance =\
self.rewards['fail'] == self.rewards['correct']
# check if first choice (phase 1)
if not self.firstcounts and first_choice:
self.first_choice_rew = reward
# set reward for all phases
self.rew = self.first_choice_rew or reward
if new_trial and self.curr_ph == 0:
self.action = action
info = {'new_trial': new_trial, 'gt': gt, 'num_tr': self.num_tr,
'curr_ph': self.curr_ph, 'first_rew': self.rew,
'keep_stage': self.keep_stage, 'inst_perf': self.inst_perf,
'trials_day': self.trials_counter, 'durs': self.dur,
'inc_delays': self.inc_delays, 'curr_perf': self.curr_perf,
'trials_count': self.trials_counter, 'th_perf': self.th_perf,
'num_stps': self.t_ind}
return self.ob_now, reward, False, info
if __name__ == '__main__':
plt.close('all')
env = CVLearning(stages=[0, 2, 3, 4], trials_day=2, keep_days=1)
data = ngym.utils.plot_env(env, num_steps=200)
env = CVLearning(stages=[3, 4], trials_day=2, keep_days=1)
data = ngym.utils.plot_env(env, num_steps=200) | [
"numpy.random.choice",
"numpy.abs",
"matplotlib.pyplot.close",
"numpy.empty",
"numpy.zeros",
"neurogym.utils.plot_env",
"gym.spaces.Discrete",
"numpy.mean",
"numpy.array",
"gym.spaces.Box",
"numpy.sign",
"numpy.sqrt"
] | [((13584, 13600), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (13593, 13600), True, 'import matplotlib.pyplot as plt\n'), ((13681, 13720), 'neurogym.utils.plot_env', 'ngym.utils.plot_env', (['env'], {'num_steps': '(200)'}), '(env, num_steps=200)\n', (13700, 13720), True, 'import neurogym as ngym\n'), ((13795, 13834), 'neurogym.utils.plot_env', 'ngym.utils.plot_env', (['env'], {'num_steps': '(200)'}), '(env, num_steps=200)\n', (13814, 13834), True, 'import neurogym as ngym\n'), ((3102, 3122), 'numpy.empty', 'np.empty', (['trials_day'], {}), '(trials_day)\n', (3110, 3122), True, 'import numpy as np\n'), ((3564, 3582), 'numpy.zeros', 'np.zeros', (['perf_len'], {}), '(perf_len)\n', (3572, 3582), True, 'import numpy as np\n'), ((4841, 4866), 'gym.spaces.Discrete', 'spaces.Discrete', (['(n_ch + 1)'], {}), '(n_ch + 1)\n', (4856, 4866), False, 'from gym import spaces\n'), ((4898, 4962), 'gym.spaces.Box', 'spaces.Box', (['(-np.inf)', 'np.inf'], {'shape': '(n_ch + 1,)', 'dtype': 'np.float32'}), '(-np.inf, np.inf, shape=(n_ch + 1,), dtype=np.float32)\n', (4908, 4962), False, 'from gym import spaces\n'), ((1877, 1913), 'numpy.array', 'np.array', (['[0, 6.4, 12.8, 25.6, 51.2]'], {}), '([0, 6.4, 12.8, 25.6, 51.2])\n', (1885, 1913), True, 'import numpy as np\n'), ((1954, 1970), 'numpy.sqrt', 'np.sqrt', (['self.dt'], {}), '(self.dt)\n', (1961, 1970), True, 'import numpy as np\n'), ((10278, 10300), 'numpy.mean', 'np.mean', (['self.mov_perf'], {}), '(self.mov_perf)\n', (10285, 10300), True, 'import numpy as np\n'), ((10846, 10868), 'numpy.mean', 'np.mean', (['self.day_perf'], {}), '(self.day_perf)\n', (10853, 10868), True, 'import numpy as np\n'), ((10897, 10922), 'numpy.empty', 'np.empty', (['self.trials_day'], {}), '(self.trials_day)\n', (10905, 10922), True, 'import numpy as np\n'), ((6063, 6090), 'numpy.abs', 'np.abs', (['self.action_counter'], {}), '(self.action_counter)\n', (6069, 6090), True, 'import numpy as np\n'), ((9684, 9712), 'numpy.sign', 'np.sign', (['self.action_counter'], {}), '(self.action_counter)\n', (9691, 9712), True, 'import numpy as np\n'), ((9716, 9728), 'numpy.sign', 'np.sign', (['new'], {}), '(new)\n', (9723, 9728), True, 'import numpy as np\n'), ((8002, 8028), 'numpy.random.choice', 'np.random.choice', (['self.dur'], {}), '(self.dur)\n', (8018, 8028), True, 'import numpy as np\n')] |
from __future__ import (absolute_import, unicode_literals, division,
print_function)
import sys
import collections
import warnings
import numpy as np
# If numba is installed, import jit. Otherwise, define an empty decorator with
# the same name.
try:
from numba import jit
except:
def jit(fun):
return fun
def simon(message, **kwargs):
"""The Statistical Interpretation MONitor.
A warning system designed to always remind the user that Simon
is watching him/her.
Parameters
----------
message : string
The message that is thrown
kwargs : dict
The rest of the arguments that are passed to warnings.warn
"""
warnings.warn("SIMON says: {0}".format(message), **kwargs)
def rebin_data(x, y, dx_new, method='sum'):
"""Rebin some data to an arbitrary new data resolution. Either sum
the data points in the new bins or average them.
Parameters
----------
x: iterable
The dependent variable with some resolution dx_old = x[1]-x[0]
y: iterable
The independent variable to be binned
dx_new: float
The new resolution of the dependent variable x
method: {"sum" | "average" | "mean"}, optional, default "sum"
The method to be used in binning. Either sum the samples y in
each new bin of x, or take the arithmetic mean.
Returns
-------
xbin: numpy.ndarray
The midpoints of the new bins in x
ybin: numpy.ndarray
The binned quantity y
"""
y = np.asarray(y)
dx_old = x[1] - x[0]
if dx_new < dx_old:
raise ValueError("New frequency resolution must be larger than "
"old frequency resolution.")
step_size = dx_new / dx_old
output = []
for i in np.arange(0, y.shape[0], step_size):
total = 0
int_i = int(i)
prev_frac = int_i + 1 - i
prev_bin = int_i
total += prev_frac * y[prev_bin]
if i + step_size < len(x):
# Fractional part of next bin:
next_frac = i + step_size - int(i + step_size)
next_bin = int(i + step_size)
total += next_frac * y[next_bin]
total += sum(y[int(i+1):int(i+step_size)])
output.append(total)
output = np.asarray(output)
if method in ['mean', 'avg', 'average', 'arithmetic mean']:
ybin = output / np.float(step_size)
elif method == "sum":
ybin = output
else:
raise ValueError("Method for summing or averaging not recognized. "
"Please enter either 'sum' or 'mean'.")
tseg = x[-1] - x[0] + dx_old
if (tseg / dx_new % 1) > 0:
ybin = ybin[:-1]
new_x0 = (x[0] - (0.5*dx_old)) + (0.5*dx_new)
xbin = np.arange(ybin.shape[0]) * dx_new + new_x0
return xbin, ybin, step_size
def assign_value_if_none(value, default):
return default if value is None else value
def look_for_array_in_array(array1, array2):
return next((i for i in array1 if i in array2), None)
def is_string(s): # pragma : no cover
"""Portable function to answer this question."""
PY2 = sys.version_info[0] == 2
if PY2:
return isinstance(s, basestring) # NOQA
else:
return isinstance(s, str) # NOQA
def is_iterable(stuff):
"""Test if stuff is an iterable."""
return isinstance(stuff, collections.Iterable)
def order_list_of_arrays(data, order):
if hasattr(data, 'items'):
data = dict([(key, value[order])
for key, value in data.items()])
elif is_iterable(data):
data = [i[order] for i in data]
else:
data = None
return data
def optimal_bin_time(fftlen, tbin):
"""Vary slightly the bin time to have a power of two number of bins.
Given an FFT length and a proposed bin time, return a bin time
slightly shorter than the original, that will produce a power-of-two number
of FFT bins.
"""
return fftlen / (2 ** np.ceil(np.log2(fftlen / tbin)))
def contiguous_regions(condition):
"""Find contiguous True regions of the boolean array "condition".
Return a 2D array where the first column is the start index of the region
and the second column is the end index.
Parameters
----------
condition : boolean array
Returns
-------
idx : [[i0_0, i0_1], [i1_0, i1_1], ...]
A list of integer couples, with the start and end of each True blocks
in the original array
Notes
-----
From : http://stackoverflow.com/questions/4494404/find-large-number-of-consecutive-values-
fulfilling-condition-in-a-numpy-array
"""
# NOQA
# Find the indicies of changes in "condition"
diff = np.diff(condition)
idx, = diff.nonzero()
# We need to start things after the change in "condition". Therefore,
# we'll shift the index by 1 to the right.
idx += 1
if condition[0]:
# If the start of condition is True prepend a 0
idx = np.r_[0, idx]
if condition[-1]:
# If the end of condition is True, append the length of the array
idx = np.r_[idx, condition.size]
# Reshape the result into two columns
idx.shape = (-1, 2)
return idx
| [
"numpy.log2",
"numpy.asarray",
"numpy.float",
"numpy.diff",
"numpy.arange"
] | [((1549, 1562), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (1559, 1562), True, 'import numpy as np\n'), ((1804, 1839), 'numpy.arange', 'np.arange', (['(0)', 'y.shape[0]', 'step_size'], {}), '(0, y.shape[0], step_size)\n', (1813, 1839), True, 'import numpy as np\n'), ((2303, 2321), 'numpy.asarray', 'np.asarray', (['output'], {}), '(output)\n', (2313, 2321), True, 'import numpy as np\n'), ((4752, 4770), 'numpy.diff', 'np.diff', (['condition'], {}), '(condition)\n', (4759, 4770), True, 'import numpy as np\n'), ((2411, 2430), 'numpy.float', 'np.float', (['step_size'], {}), '(step_size)\n', (2419, 2430), True, 'import numpy as np\n'), ((2786, 2810), 'numpy.arange', 'np.arange', (['ybin.shape[0]'], {}), '(ybin.shape[0])\n', (2795, 2810), True, 'import numpy as np\n'), ((4020, 4042), 'numpy.log2', 'np.log2', (['(fftlen / tbin)'], {}), '(fftlen / tbin)\n', (4027, 4042), True, 'import numpy as np\n')] |
"""Utility functions for posterior inference"""
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability import edward2 as ed
tfd = tfp.distributions
def make_value_setter(**model_kwargs):
"""Creates a value-setting interceptor for VI under Edward2."""
def set_values(f, *args, **kwargs):
"""Sets random variable values to its aligned value."""
name = kwargs.get("name")
if name in model_kwargs:
kwargs["value"] = model_kwargs[name]
return ed.interceptable(f)(*args, **kwargs)
return set_values
def make_sparse_gp_parameters(m, S,
X, Z, ls, kern_func,
ridge_factor=1e-3,
mean_name='qf_mean', compute_mean=True):
"""Produces variational parameters for sparse GP approximation.
Args:
m: (tf.Tensor or None) Variational parameter for mean of latent GP, shape (Nz, )
Can be None if compute_mean=False
S: (tf.Tensor) Variational parameter for covariance of latent GP, shape (Nz, Nz)
X: (np.ndarray of float32) input training features, with dimension (Nx, D).
Z: (np.ndarray of float32) inducing points, with dimension (Nz, D).
ls: (float32) length scale parameter.
kern_func: (function) kernel function.
ridge_factor: (float32) small ridge factor to stabilize Cholesky decomposition
mean_name: (str) name for the mean parameter
compute_mean: (bool) If False, mean variational parameter is not computed.
In this case, its ok to have m=None
Returns:
Mu (tf.Tensor or none) Mean parameters for sparse Gaussian Process, shape (Nx, ).
if compute_mean=False, then Mu is None.
Sigma (tf.Tensor) Covariance parameters for sparse Gaussian Process, shape (Nx, Nx).
"""
Nx, Nz = X.shape[0], Z.shape[0]
# compute matrix constants
Kxx = kern_func(X, ls=ls)
Kxz = kern_func(X, Z, ls=ls)
Kzz = kern_func(Z, ls=ls, ridge_factor=ridge_factor)
# compute null covariance matrix using Cholesky decomposition
Kzz_chol_inv = tf.matrix_inverse(tf.cholesky(Kzz))
Kzz_inv = tf.matmul(Kzz_chol_inv, Kzz_chol_inv, transpose_a=True)
Kxz_Kzz_chol_inv = tf.matmul(Kxz, Kzz_chol_inv, transpose_b=True)
Kxz_Kzz_inv = tf.matmul(Kxz, Kzz_inv)
Sigma_pre = Kxx - tf.matmul(Kxz_Kzz_chol_inv, Kxz_Kzz_chol_inv, transpose_b=True)
# compute sparse gp variational parameter (i.e. mean and covariance of P(f_obs | f_latent))
Sigma = (Sigma_pre +
tf.matmul(Kxz_Kzz_inv,
tf.matmul(S, Kxz_Kzz_inv, transpose_b=True)) +
ridge_factor * tf.eye(Nx))
if compute_mean:
Mu = tf.tensordot(Kxz_Kzz_inv, m, [[1], [0]], name=mean_name)
else:
Mu = None
return Mu, Sigma
def make_cond_gp_parameters(K_00, K_11, K_22,
K_01, K_20, K_21,
ridge_factor_K=1e-3,
ridge_factor_Sigma=1e-3):
"""Computes the conditional posterior for f_new|f_obs, f_deriv.
For stability, numpy is used instead of tensorflow.
"""
# convert to np array
with tf.Session() as sess:
K_00, K_11, K_22, K_01, K_20, K_21 = sess.run([
K_00, K_11, K_22, K_01, K_20, K_21
])
K_00 = K_00.astype(np.float64)
K_11 = K_11.astype(np.float64)
K_22 = K_22.astype(np.float64)
K_01 = K_01.astype(np.float64)
K_20 = K_20.astype(np.float64)
K_21 = K_21.astype(np.float64)
# compute matrix components
K_11_inv_12 = np.matmul(np.linalg.pinv(K_11), K_21.T)
K_22_inv_21 = np.matmul(np.linalg.pinv(K_22), K_21)
# assemble projection matrix
K_02_1 = K_20.T - np.matmul(K_01, K_11_inv_12)
K_22_1 = (K_22 - np.matmul(K_21, K_11_inv_12) +
ridge_factor_K * np.eye(K_22.shape[0]))
K_01_2 = K_01 - np.matmul(K_20.T, K_22_inv_21)
K_11_2 = K_11 - np.matmul(K_21.T, K_22_inv_21)
# compute mean projection matrix
P_01 = np.matmul(K_01_2, np.linalg.pinv(K_11_2))
P_02 = np.matmul(K_02_1, np.linalg.pinv(K_22_1))
# compute Cholesky decomposition for covariance matrix.
Sigma = K_00 - K_01.dot(np.linalg.pinv(K_11).dot(K_01.T))
# np.matmul(P_01, K_01.T)
# - np.matmul(P_02, K_20) +
# ridge_factor_Sigma * np.eye(K_00.shape[0]))
# Sigma_chol = np.linalg.cholesky(Sigma).astype(np.float32)
return P_01.astype(np.float32), P_02.astype(np.float32), Sigma
def make_mfvi_mixture_family(n_mixture, N, name):
"""Makes mixture of MFVI variational prior
Args:
n_mixture: (int) Number of MFVI mixture.
N: (int) Number of sample observations.
name: (str) Name prefix of parameters
Returns:
mfvi_mix_dist: (tfd.Distribution) Mixture distribution.
mixture_logits_mfvi_mix: (tf.Variable or None) Mixture probability
(logit) for MFVI families. If n_mixture=1 then None.
qf_mean_mfvi_mix, qf_sdev_mfvi_mix (tf.Variable) Mean and sdev for
MFVI families. Shape (n_mixture, Nx) if n_mixture > 1, and shape
(Nx, ) if n_mixture = 1.
"""
# define mixture probability
mixture_logits_mfvi_mix = tf.get_variable(shape=[n_mixture],
name='{}_mixture_logits_mfvi_mix'.format(name))
# define variational parameter
param_shape = [n_mixture, N] if n_mixture > 1 else [N]
qf_mean_mfvi_mix = tf.get_variable(shape=param_shape,
name='{}_mean_mfvi_mix'.format(name))
qf_sdev_mfvi_mix = tf.exp(tf.get_variable(shape=param_shape,
name='{}_sdev_mfvi_mix'.format(name)))
if n_mixture == 1:
mfvi_mix_dist = tfd.MultivariateNormalDiag(loc=qf_mean_mfvi_mix,
scale_diag=qf_sdev_mfvi_mix)
else:
mfvi_mix_dist = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(logits=mixture_logits_mfvi_mix),
components_distribution=tfd.MultivariateNormalDiag(
loc=qf_mean_mfvi_mix,
scale_diag=qf_sdev_mfvi_mix),
)
return mfvi_mix_dist, mixture_logits_mfvi_mix, qf_mean_mfvi_mix, qf_sdev_mfvi_mix
def sample_mfvi_mixture_family(N_sample, mixture_logits,
mean_mfvi_mix, sdev_mfvi_mix):
"""Samples from mixture of MFVI family.
Args:
N_sample: (int) Number of samples.
mixture_logits: (or None) Number of MFVI mixture.
mean_mfvi_mix: (np.ndarray) Means for MFVI components,
shape (n_mixture, N), dtype float32.
sdev_mfvi_mix: (np.ndarray) Stddev for MFVI components,
shape (n_mixture, N), dtype float32.
Returns:
mfvi_mix_sample: (tf.Tensor) Samples from mixture family,
shape (N, ), dtype float32.
"""
# define mixture distribution
if mixture_logits.shape[0] > 1:
mfvi_mix_dist = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(logits=mixture_logits),
components_distribution=tfd.MultivariateNormalDiag(
loc=mean_mfvi_mix, scale_diag=sdev_mfvi_mix),
)
else:
mfvi_mix_dist = tfd.MultivariateNormalDiag(loc=mean_mfvi_mix,
scale_diag=sdev_mfvi_mix)
return mfvi_mix_dist.sample(N_sample)
def make_mfvi_sgp_mixture_family(n_mixture, N, gp_dist, name,
use_logistic_link=False):
"""Makes mixture of MFVI and Sparse GP variational prior
Args:
n_mixture: (int) Number of MFVI mixture.
N: (int) Number of sample observations.
gp_dist: (tfd.Distribution) variational family for gaussian process.
name: (str) Name prefix of parameters
Returns:
mfvi_mix_dist: (tfd.Distribution) Mixture distribution.
mixture_logits_mfvi_mix: (tf.Variable or None) Mixture probability
(logit) for MFVI families. If n_mixture=1 then None.
qf_mean_mfvi_mix, qf_sdev_mfvi_mix (tf.Variable) Mean and sdev for
MFVI families. Shape (n_mixture, Nx) if n_mixture > 1, and shape
(Nx, ) if n_mixture = 1.
"""
# define mixture probability
mixture_logits = tf.get_variable(name="{}_mixture_logits".format(name), shape=[2])
(mfvi_mix_dist, mixture_logits_mfvi_mix,
qf_mean_mfvi_mix, qf_sdev_mfvi_mix
) = make_mfvi_mixture_family(n_mixture=n_mixture, N=N, name=name)
mixture_par_list = [mixture_logits, mixture_logits_mfvi_mix,
qf_mean_mfvi_mix, qf_sdev_mfvi_mix]
if use_logistic_link:
mfvi_sgp_mix_dist = ed.TransformedDistribution(
tfd.Mixture(
cat=tfd.Categorical(logits=mixture_logits),
components=[mfvi_mix_dist, gp_dist]),
bijector=tfp.bijectors.Sigmoid(),
name=name)
else:
mfvi_sgp_mix_dist = ed.Mixture(
cat=tfd.Categorical(logits=mixture_logits),
components=[mfvi_mix_dist, gp_dist],
name=name)
return mfvi_sgp_mix_dist, mixture_par_list
def scalar_gaussian_variational(name, mean=None, sdev=None):
"""
Creates a scalar Gaussian random variable for variational approximation.
Args:
name: (str) name of the output random variable.
Returns:
(ed.RandomVariable of float32) A normal scalar random variable.
"""
if mean is None:
mean = tf.get_variable(shape=[], name='{}_mean'.format(name))
else:
mean = tf.convert_to_tensor(mean, dtype=tf.float32)
if sdev is None:
sdev = tf.exp(tf.get_variable(shape=[], name='{}_sdev'.format(name)))
else:
sdev = tf.convert_to_tensor(sdev, dtype=tf.float32)
scalar_gaussian_rv = ed.Normal(loc=mean, scale=sdev, name=name)
return scalar_gaussian_rv, mean, sdev
def sample_scalar_gaussian_variational(n_sample, mean, sdev):
"""Generates samples from GPR scalar Gaussian random variable.
Args:
n_sample: (int) number of samples to draw
qf_mean: (tf.Tensor of float32) mean parameters for variational family
qf_sdev: (tf.Tensor of float32) standard deviation for variational family
Returns:
(np.ndarray) sampled values.
"""
"""Generates f samples from GPR mean-field variational family."""
scalar_gaussian_rv = tfd.Normal(loc=mean, scale=sdev)
return scalar_gaussian_rv.sample(n_sample)
| [
"tensorflow_probability.edward2.interceptable",
"tensorflow.convert_to_tensor",
"tensorflow.eye",
"tensorflow.Session",
"tensorflow.matmul",
"tensorflow_probability.edward2.Normal",
"tensorflow_probability.bijectors.Sigmoid",
"tensorflow.tensordot",
"numpy.matmul",
"numpy.eye",
"numpy.linalg.pin... | [((2225, 2280), 'tensorflow.matmul', 'tf.matmul', (['Kzz_chol_inv', 'Kzz_chol_inv'], {'transpose_a': '(True)'}), '(Kzz_chol_inv, Kzz_chol_inv, transpose_a=True)\n', (2234, 2280), True, 'import tensorflow as tf\n'), ((2305, 2351), 'tensorflow.matmul', 'tf.matmul', (['Kxz', 'Kzz_chol_inv'], {'transpose_b': '(True)'}), '(Kxz, Kzz_chol_inv, transpose_b=True)\n', (2314, 2351), True, 'import tensorflow as tf\n'), ((2370, 2393), 'tensorflow.matmul', 'tf.matmul', (['Kxz', 'Kzz_inv'], {}), '(Kxz, Kzz_inv)\n', (2379, 2393), True, 'import tensorflow as tf\n'), ((9953, 9995), 'tensorflow_probability.edward2.Normal', 'ed.Normal', ([], {'loc': 'mean', 'scale': 'sdev', 'name': 'name'}), '(loc=mean, scale=sdev, name=name)\n', (9962, 9995), True, 'from tensorflow_probability import edward2 as ed\n'), ((2193, 2209), 'tensorflow.cholesky', 'tf.cholesky', (['Kzz'], {}), '(Kzz)\n', (2204, 2209), True, 'import tensorflow as tf\n'), ((2416, 2479), 'tensorflow.matmul', 'tf.matmul', (['Kxz_Kzz_chol_inv', 'Kxz_Kzz_chol_inv'], {'transpose_b': '(True)'}), '(Kxz_Kzz_chol_inv, Kxz_Kzz_chol_inv, transpose_b=True)\n', (2425, 2479), True, 'import tensorflow as tf\n'), ((2783, 2839), 'tensorflow.tensordot', 'tf.tensordot', (['Kxz_Kzz_inv', 'm', '[[1], [0]]'], {'name': 'mean_name'}), '(Kxz_Kzz_inv, m, [[1], [0]], name=mean_name)\n', (2795, 2839), True, 'import tensorflow as tf\n'), ((3256, 3268), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3266, 3268), True, 'import tensorflow as tf\n'), ((3687, 3707), 'numpy.linalg.pinv', 'np.linalg.pinv', (['K_11'], {}), '(K_11)\n', (3701, 3707), True, 'import numpy as np\n'), ((3745, 3765), 'numpy.linalg.pinv', 'np.linalg.pinv', (['K_22'], {}), '(K_22)\n', (3759, 3765), True, 'import numpy as np\n'), ((3829, 3857), 'numpy.matmul', 'np.matmul', (['K_01', 'K_11_inv_12'], {}), '(K_01, K_11_inv_12)\n', (3838, 3857), True, 'import numpy as np\n'), ((3984, 4014), 'numpy.matmul', 'np.matmul', (['K_20.T', 'K_22_inv_21'], {}), '(K_20.T, K_22_inv_21)\n', (3993, 4014), True, 'import numpy as np\n'), ((4035, 4065), 'numpy.matmul', 'np.matmul', (['K_21.T', 'K_22_inv_21'], {}), '(K_21.T, K_22_inv_21)\n', (4044, 4065), True, 'import numpy as np\n'), ((4133, 4155), 'numpy.linalg.pinv', 'np.linalg.pinv', (['K_11_2'], {}), '(K_11_2)\n', (4147, 4155), True, 'import numpy as np\n'), ((4186, 4208), 'numpy.linalg.pinv', 'np.linalg.pinv', (['K_22_1'], {}), '(K_22_1)\n', (4200, 4208), True, 'import numpy as np\n'), ((9712, 9756), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['mean'], {'dtype': 'tf.float32'}), '(mean, dtype=tf.float32)\n', (9732, 9756), True, 'import tensorflow as tf\n'), ((9882, 9926), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['sdev'], {'dtype': 'tf.float32'}), '(sdev, dtype=tf.float32)\n', (9902, 9926), True, 'import tensorflow as tf\n'), ((548, 567), 'tensorflow_probability.edward2.interceptable', 'ed.interceptable', (['f'], {}), '(f)\n', (564, 567), True, 'from tensorflow_probability import edward2 as ed\n'), ((2736, 2746), 'tensorflow.eye', 'tf.eye', (['Nx'], {}), '(Nx)\n', (2742, 2746), True, 'import tensorflow as tf\n'), ((3879, 3907), 'numpy.matmul', 'np.matmul', (['K_21', 'K_11_inv_12'], {}), '(K_21, K_11_inv_12)\n', (3888, 3907), True, 'import numpy as np\n'), ((3941, 3962), 'numpy.eye', 'np.eye', (['K_22.shape[0]'], {}), '(K_22.shape[0])\n', (3947, 3962), True, 'import numpy as np\n'), ((2661, 2704), 'tensorflow.matmul', 'tf.matmul', (['S', 'Kxz_Kzz_inv'], {'transpose_b': '(True)'}), '(S, Kxz_Kzz_inv, transpose_b=True)\n', (2670, 2704), True, 'import tensorflow as tf\n'), ((9013, 9036), 'tensorflow_probability.bijectors.Sigmoid', 'tfp.bijectors.Sigmoid', ([], {}), '()\n', (9034, 9036), True, 'import tensorflow_probability as tfp\n'), ((4299, 4319), 'numpy.linalg.pinv', 'np.linalg.pinv', (['K_11'], {}), '(K_11)\n', (4313, 4319), True, 'import numpy as np\n')] |
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""NIfTI metadata extractor"""
from os.path import join as opj
import logging
lgr = logging.getLogger('datalad.metadata.extractors.nifti1')
from datalad.log import log_progress
from math import isnan
import nibabel
import numpy as np
from datalad.metadata.definitions import vocabulary_id
from datalad.metadata.extractors.base import BaseMetadataExtractor
from datalad.dochelpers import exc_str
vocabulary = {
'nifti1': {
'@id': 'https://nifti.nimh.nih.gov/nifti-1/documentation/nifti1fields#',
'description': 'Ad-hoc vocabulary for NIfTI1 header fields',
'type': vocabulary_id},
"spatial_resolution(mm)": {
'@id': "idqa:0000162",
'unit': "uo:0000016",
'unit_label': 'millimeter',
'description': "spatial resolution in millimeter"},
"temporal_spacing(s)": {
'@id': "idqa:0000213",
'unit': "uo:0000010",
'unit_label': 'second',
'description': "temporal sample distance in 4D (in seconds)"},
}
unit_map = {
'meter': ('meter', 'uo:0000008'),
'millimeter': ('millimiter', 'uo:0000016'),
'mm': ('millimiter', 'uo:0000016'),
'micron': ('micrometer', 'uo:0000017'),
'second': ('second', 'uo:0000010'),
'sec': ('second', 'uo:0000010'),
'usec': ('microsecond', 'uo:0000029'),
'hertz': ('hertz', 'uo:0000106'),
'hz': ('hertz', 'uo:0000106'),
'ppm': ('parts per million', 'uo:0000109'),
'rad': ('radian', 'uo:0000123'),
'rads': ('radian', 'uo:0000123'),
}
# to serve as a default for when expect 0 to be consumable by np.asscalar
_array0 = np.array(0)
class MetadataExtractor(BaseMetadataExtractor):
_unique_exclude = {
'cal_min',
'cal_max',
}
_key2stdkey = {
'descrip': 'description',
}
_extractors = {
'datatype': lambda x: x.get_data_dtype().name,
'intent': lambda x: x.get_intent(code_repr='label')[0],
'freq_axis': lambda x: x.get_dim_info()[0],
'phase_axis': lambda x: x.get_dim_info()[1],
'slice_axis': lambda x: x.get_dim_info()[2],
'xyz_unit': lambda x: '{} ({})'.format(
*unit_map[x.get_xyzt_units()[0]]) if x.get_xyzt_units()[0] in unit_map else '',
't_unit': lambda x: '{} ({})'.format(
*unit_map[x.get_xyzt_units()[1]]) if x.get_xyzt_units()[1] in unit_map else '',
'qform_code': lambda x: nibabel.nifti1.xform_codes.label[
np.asscalar(x.get('qform_code', _array0))],
'sform_code': lambda x: nibabel.nifti1.xform_codes.label[
np.asscalar(x.get('sform_code', _array0))],
'slice_order': lambda x: nibabel.nifti1.slice_order_codes.label[
np.asscalar(x.get('slice_code', _array0))],
}
_ignore = {
'datatype',
'intent_p1',
'intent_p2',
'intent_p3',
'intent_code',
'dim_info',
'xyzt_units',
'qform_code',
'sform_code',
'quatern_b',
'quatern_c',
'quatern_d',
'qoffset_x',
'qoffset_y',
'qoffset_z',
'srow_x',
'srow_y',
'srow_z',
'slice_code',
'bitpix',
# unused fields in the ANALYZE header
'data_type',
'db_name',
'extents',
'session_error',
'regular',
'glmax',
'glmin',
}
def get_metadata(self, dataset, content):
if not content:
return {}, []
contentmeta = []
log_progress(
lgr.info,
'extractornifti1',
'Start NIfTI1 metadata extraction from %s', self.ds,
total=len(self.paths),
label='NIfTI1 metadata extraction',
unit=' Files',
)
for f in self.paths:
absfp = opj(self.ds.path, f)
log_progress(
lgr.info,
'extractornifti1',
'Extract NIfTI1 metadata from %s', absfp,
update=1,
increment=True)
try:
header = nibabel.load(absfp).header
except Exception as e:
lgr.debug("NIfTI metadata extractor failed to load %s: %s",
absfp, exc_str(e))
continue
if not isinstance(header, nibabel.Nifti1Header):
# all we can do for now
lgr.debug("Ignoring non-NIfTI1 file %s", absfp)
continue
# blunt conversion of the entire header
meta = {self._key2stdkey.get(k, k):
[np.asscalar(i) for i in v]
if len(v.shape)
# scalar
else np.asscalar(v)
for k, v in header.items()
if k not in self._ignore}
# more convenient info from nibabel's support functions
meta.update(
{k: v(header) for k, v in self._extractors.items()})
# filter useless fields (empty strings and NaNs)
meta = {k: v for k, v in meta.items()
if not (isinstance(v, float) and isnan(v)) and
not (hasattr(v, '__len__') and not len(v))}
# a few more convenient targeted extracts from the header
# spatial resolution in millimeter
spatial_unit = header.get_xyzt_units()[0]
# by what factor to multiply by to get to 'mm'
if spatial_unit == 'unknown':
lgr.debug(
"unit of spatial resolution for '{}' unknown, assuming 'millimeter'".format(
absfp))
spatial_unit_conversion = {
'unknown': 1,
'meter': 1000,
'mm': 1,
'micron': 0.001}.get(spatial_unit, None)
if spatial_unit_conversion is None:
lgr.debug("unexpected spatial unit code '{}' from NiBabel".format(
spatial_unit))
# TODO does not see the light of day
meta['spatial_resolution(mm)'] = \
[(i * spatial_unit_conversion) for i in header.get_zooms()[:3]]
# time
if len(header.get_zooms()) > 3:
# got a 4th dimension
rts_unit = header.get_xyzt_units()[1]
if rts_unit == 'unknown':
lgr.warn(
"RTS unit '{}' unknown, assuming 'seconds'".format(
absfp))
# normalize to seconds, if possible
rts_unit_conversion = {
'msec': 0.001,
'micron': 0.000001}.get(rts_unit, 1.0)
if rts_unit not in ('hz', 'ppm', 'rads'):
meta['temporal_spacing(s)'] = \
header.get_zooms()[3] * rts_unit_conversion
contentmeta.append((f, meta))
# Decode entries which might be bytes
# TODO: consider doing that in above "metalad" logic
for k, v in meta.items():
if isinstance(v, bytes):
meta[k] = v.decode()
log_progress(
lgr.info,
'extractornifti1',
'Finished NIfTI1 metadata extraction from %s', self.ds
)
return {
'@context': vocabulary,
}, \
contentmeta
| [
"math.isnan",
"nibabel.load",
"datalad.log.log_progress",
"numpy.array",
"datalad.dochelpers.exc_str",
"numpy.asscalar",
"os.path.join",
"logging.getLogger"
] | [((474, 529), 'logging.getLogger', 'logging.getLogger', (['"""datalad.metadata.extractors.nifti1"""'], {}), "('datalad.metadata.extractors.nifti1')\n", (491, 529), False, 'import logging\n'), ((1972, 1983), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (1980, 1983), True, 'import numpy as np\n'), ((7525, 7626), 'datalad.log.log_progress', 'log_progress', (['lgr.info', '"""extractornifti1"""', '"""Finished NIfTI1 metadata extraction from %s"""', 'self.ds'], {}), "(lgr.info, 'extractornifti1',\n 'Finished NIfTI1 metadata extraction from %s', self.ds)\n", (7537, 7626), False, 'from datalad.log import log_progress\n'), ((4166, 4186), 'os.path.join', 'opj', (['self.ds.path', 'f'], {}), '(self.ds.path, f)\n', (4169, 4186), True, 'from os.path import join as opj\n'), ((4199, 4312), 'datalad.log.log_progress', 'log_progress', (['lgr.info', '"""extractornifti1"""', '"""Extract NIfTI1 metadata from %s"""', 'absfp'], {'update': '(1)', 'increment': '(True)'}), "(lgr.info, 'extractornifti1', 'Extract NIfTI1 metadata from %s',\n absfp, update=1, increment=True)\n", (4211, 4312), False, 'from datalad.log import log_progress\n'), ((4432, 4451), 'nibabel.load', 'nibabel.load', (['absfp'], {}), '(absfp)\n', (4444, 4451), False, 'import nibabel\n'), ((5069, 5083), 'numpy.asscalar', 'np.asscalar', (['v'], {}), '(v)\n', (5080, 5083), True, 'import numpy as np\n'), ((4603, 4613), 'datalad.dochelpers.exc_str', 'exc_str', (['e'], {}), '(e)\n', (4610, 4613), False, 'from datalad.dochelpers import exc_str\n'), ((4952, 4966), 'numpy.asscalar', 'np.asscalar', (['i'], {}), '(i)\n', (4963, 4966), True, 'import numpy as np\n'), ((5503, 5511), 'math.isnan', 'isnan', (['v'], {}), '(v)\n', (5508, 5511), False, 'from math import isnan\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.