code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""Plot quantile or local effective sample sizes."""
import numpy as np
import xarray as xr
from ..data import convert_to_dataset
from ..labels import BaseLabeller
from ..rcparams import rcParams
from ..sel_utils import xarray_var_iter
from ..stats import ess
from ..utils import _var_names, get_coords
from .plot_utils import default_grid, filter_plotters_list, get_plotting_function
def plot_ess(
idata,
var_names=None,
filter_vars=None,
kind="local",
relative=False,
coords=None,
figsize=None,
grid=None,
textsize=None,
rug=False,
rug_kind="diverging",
n_points=20,
extra_methods=False,
min_ess=400,
labeller=None,
ax=None,
extra_kwargs=None,
text_kwargs=None,
hline_kwargs=None,
rug_kwargs=None,
backend=None,
backend_kwargs=None,
show=None,
**kwargs,
):
"""Plot quantile, local or evolution of effective sample sizes (ESS).
Parameters
----------
idata: obj
Any object that can be converted to an :class:`arviz.InferenceData` object
Refer to documentation of :func:`arviz.convert_to_dataset` for details
var_names: list of variable names, optional
Variables to be plotted. Prefix the variables by ``~`` when you want to exclude
them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
kind: str, optional
Options: ``local``, ``quantile`` or ``evolution``, specify the kind of plot.
relative: bool
Show relative ess in plot ``ress = ess / N``.
coords: dict, optional
Coordinates of var_names to be plotted. Passed to :meth:`xarray.Dataset.sel`.
grid : tuple
Number of rows and columns. Defaults to None, the rows and columns are
automatically inferred.
figsize: tuple, optional
Figure size. If None it will be defined automatically.
textsize: float, optional
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on figsize.
rug: bool
Plot rug plot of values diverging or that reached the max tree depth.
rug_kind: bool
Variable in sample stats to use as rug mask. Must be a boolean variable.
n_points: int
Number of points for which to plot their quantile/local ess or number of subsets
in the evolution plot.
extra_methods: bool, optional
Plot mean and sd ESS as horizontal lines. Not taken into account in evolution kind
min_ess: int
Minimum number of ESS desired. If ``relative=True`` the line is plotted at
``min_ess / n_samples`` for local and quantile kinds and as a curve following
the ``min_ess / n`` dependency in evolution kind.
labeller : labeller instance, optional
Class providing the method ``make_label_vert`` to generate the labels in the plot titles.
Read the :ref:`label_guide` for more details and usage examples.
ax: numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
its own array of plot areas (and return it).
extra_kwargs: dict, optional
If evolution plot, extra_kwargs is used to plot ess tail and differentiate it
from ess bulk. Otherwise, passed to extra methods lines.
text_kwargs: dict, optional
Only taken into account when ``extra_methods=True``. kwargs passed to ax.annotate
for extra methods lines labels. It accepts the additional
key ``x`` to set ``xy=(text_kwargs["x"], mcse)``
hline_kwargs: dict, optional
kwargs passed to :func:`~matplotlib.axes.Axes.axhline` or to :class:`~bokeh.models.Span`
depending on the backend for the horizontal minimum ESS line.
For relative ess evolution plots the kwargs are passed to
:func:`~matplotlib.axes.Axes.plot` or to :class:`~bokeh.plotting.figure.line`
rug_kwargs: dict
kwargs passed to rug plot.
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`.
For additional documentation check the plotting method of the backend.
show: bool, optional
Call backend show function.
**kwargs
Passed as-is to :meth:`mpl:matplotlib.axes.Axes.hist` or
:meth:`mpl:matplotlib.axes.Axes.plot` function depending on the
value of ``kind``.
Returns
-------
axes: matplotlib axes or bokeh figures
See Also
--------
ess: Calculate estimate of the effective sample size.
References
----------
* Vehtari et al. (2019) see https://arxiv.org/abs/1903.08008
Examples
--------
Plot local ESS. This plot, together with the quantile ESS plot, is recommended to check
that there are enough samples for all the explored regions of parameter space. Checking
local and quantile ESS is particularly relevant when working with HDI intervals as
opposed to ESS bulk, which is relevant for point estimates.
.. plot::
:context: close-figs
>>> import arviz as az
>>> idata = az.load_arviz_data("centered_eight")
>>> coords = {"school": ["Choate", "Lawrenceville"]}
>>> az.plot_ess(
... idata, kind="local", var_names=["mu", "theta"], coords=coords
... )
Plot quantile ESS and exclude variables with partial naming
.. plot::
:context: close-figs
>>> az.plot_ess(
... idata, kind="quantile", var_names=['~thet'], filter_vars="like", coords=coords
... )
Plot ESS evolution as the number of samples increase. When the model is converging properly,
both lines in this plot should be roughly linear.
.. plot::
:context: close-figs
>>> az.plot_ess(
... idata, kind="evolution", var_names=["mu", "theta"], coords=coords
... )
Customize local ESS plot to look like reference paper.
.. plot::
:context: close-figs
>>> az.plot_ess(
... idata, kind="local", var_names=["mu"], drawstyle="steps-mid", color="k",
... linestyle="-", marker=None, rug=True, rug_kwargs={"color": "r"}
... )
Customize ESS evolution plot to look like reference paper.
.. plot::
:context: close-figs
>>> extra_kwargs = {"color": "lightsteelblue"}
>>> az.plot_ess(
... idata, kind="evolution", var_names=["mu"],
... color="royalblue", extra_kwargs=extra_kwargs
... )
"""
valid_kinds = ("local", "quantile", "evolution")
kind = kind.lower()
if kind not in valid_kinds:
raise ValueError(f"Invalid kind, kind must be one of {valid_kinds} not {kind}")
if coords is None:
coords = {}
if "chain" in coords or "draw" in coords:
raise ValueError("chain and draw are invalid coordinates for this kind of plot")
if labeller is None:
labeller = BaseLabeller()
extra_methods = False if kind == "evolution" else extra_methods
data = get_coords(convert_to_dataset(idata, group="posterior"), coords)
var_names = _var_names(var_names, data, filter_vars)
n_draws = data.dims["draw"]
n_samples = n_draws * data.dims["chain"]
ess_tail_dataset = None
mean_ess = None
sd_ess = None
if kind == "quantile":
probs = np.linspace(1 / n_points, 1 - 1 / n_points, n_points)
xdata = probs
ylabel = "{} for quantiles"
ess_dataset = xr.concat(
[
ess(data, var_names=var_names, relative=relative, method="quantile", prob=p)
for p in probs
],
dim="ess_dim",
)
elif kind == "local":
probs = np.linspace(0, 1, n_points, endpoint=False)
xdata = probs
ylabel = "{} for small intervals"
ess_dataset = xr.concat(
[
ess(
data,
var_names=var_names,
relative=relative,
method="local",
prob=[p, p + 1 / n_points],
)
for p in probs
],
dim="ess_dim",
)
else:
first_draw = data.draw.values[0]
ylabel = "{}"
xdata = np.linspace(n_samples / n_points, n_samples, n_points)
draw_divisions = np.linspace(n_draws // n_points, n_draws, n_points, dtype=int)
ess_dataset = xr.concat(
[
ess(
data.sel(draw=slice(first_draw + draw_div)),
var_names=var_names,
relative=relative,
method="bulk",
)
for draw_div in draw_divisions
],
dim="ess_dim",
)
ess_tail_dataset = xr.concat(
[
ess(
data.sel(draw=slice(first_draw + draw_div)),
var_names=var_names,
relative=relative,
method="tail",
)
for draw_div in draw_divisions
],
dim="ess_dim",
)
plotters = filter_plotters_list(
list(xarray_var_iter(ess_dataset, var_names=var_names, skip_dims={"ess_dim"})), "plot_ess"
)
length_plotters = len(plotters)
rows, cols = default_grid(length_plotters, grid=grid)
if extra_methods:
mean_ess = ess(data, var_names=var_names, method="mean", relative=relative)
sd_ess = ess(data, var_names=var_names, method="sd", relative=relative)
essplot_kwargs = dict(
ax=ax,
plotters=plotters,
xdata=xdata,
ess_tail_dataset=ess_tail_dataset,
mean_ess=mean_ess,
sd_ess=sd_ess,
idata=idata,
data=data,
kind=kind,
extra_methods=extra_methods,
textsize=textsize,
rows=rows,
cols=cols,
figsize=figsize,
kwargs=kwargs,
extra_kwargs=extra_kwargs,
text_kwargs=text_kwargs,
n_samples=n_samples,
relative=relative,
min_ess=min_ess,
labeller=labeller,
ylabel=ylabel,
rug=rug,
rug_kind=rug_kind,
rug_kwargs=rug_kwargs,
hline_kwargs=hline_kwargs,
backend_kwargs=backend_kwargs,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_ess", "essplot", backend)
ax = plot(**essplot_kwargs)
return ax
| [
"numpy.linspace"
] | [((7851, 7904), 'numpy.linspace', 'np.linspace', (['(1 / n_points)', '(1 - 1 / n_points)', 'n_points'], {}), '(1 / n_points, 1 - 1 / n_points, n_points)\n', (7862, 7904), True, 'import numpy as np\n'), ((8228, 8271), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n_points'], {'endpoint': '(False)'}), '(0, 1, n_points, endpoint=False)\n', (8239, 8271), True, 'import numpy as np\n'), ((8784, 8838), 'numpy.linspace', 'np.linspace', (['(n_samples / n_points)', 'n_samples', 'n_points'], {}), '(n_samples / n_points, n_samples, n_points)\n', (8795, 8838), True, 'import numpy as np\n'), ((8864, 8926), 'numpy.linspace', 'np.linspace', (['(n_draws // n_points)', 'n_draws', 'n_points'], {'dtype': 'int'}), '(n_draws // n_points, n_draws, n_points, dtype=int)\n', (8875, 8926), True, 'import numpy as np\n')] |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Simple transfer learning with Inception v3 or Mobilenet models.
With support for TensorBoard.
This example shows how to take a Inception v3 or Mobilenet model trained on
ImageNet images, and train a new top layer that can recognize other classes of
images.
The top layer receives as input a 2048-dimensional vector (1001-dimensional for
Mobilenet) for each image. We train a softmax layer on top of this
representation. Assuming the softmax layer contains N labels, this corresponds
to learning N + 2048*N (or 1001*N) model parameters corresponding to the
learned biases and weights.
Here's an example, which assumes you have a folder containing class-named
subfolders, each full of images for each label. The example folder flower_photos
should have a structure like this:
~/flower_photos/daisy/photo1.jpg
~/flower_photos/daisy/photo2.jpg
...
~/flower_photos/rose/anotherphoto77.jpg
...
~/flower_photos/sunflower/somepicture.jpg
The subfolder names are important, since they define what label is applied to
each image, but the filenames themselves don't matter. Once your images are
prepared, you can run the training with a command like this:
```bash
bazel build tensorflow/examples/image_retraining:retrain && \
bazel-bin/tensorflow/examples/image_retraining/retrain \
--image_dir ~/flower_photos
```
Or, if you have a pip installation of tensorflow, `retrain.py` can be run
without bazel:
```bash
python tensorflow/examples/image_retraining/retrain.py \
--image_dir ~/flower_photos
```
You can replace the image_dir argument with any folder containing subfolders of
images. The label for each image is taken from the name of the subfolder it's
in.
This produces a new model file that can be loaded and run by any TensorFlow
program, for example the label_image sample code.
By default this script will use the high accuracy, but comparatively large and
slow Inception v3 model architecture. It's recommended that you start with this
to validate that you have gathered good training data, but if you want to deploy
on resource-limited platforms, you can try the `--architecture` flag with a
Mobilenet model. For example:
```bash
python tensorflow/examples/image_retraining/retrain.py \
--image_dir ~/flower_photos --architecture mobilenet_1.0_224
```
There are 32 different Mobilenet models to choose from, with a variety of file
size and latency options. The first number can be '1.0', '0.75', '0.50', or
'0.25' to control the size, and the second controls the input image size, either
'224', '192', '160', or '128', with smaller sizes running faster. See
https://research.googleblog.com/2017/06/mobilenets-open-source-models-for.html
for more information on Mobilenet.
To use with TensorBoard:
By default, this script will log summaries to /tmp/retrain_logs directory
Visualize the summaries with this command:
tensorboard --logdir /tmp/retrain_logs
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from datetime import datetime
import hashlib
import os.path
import random
import re
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
from django.conf import settings
# These are all parameters that are tied to the particular model architecture
# we're using for Inception v3. These include things like tensor names and their
# sizes. If you want to adapt this script to work with another model, you will
# need to update these to reflect the values in the network you're using.
MAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M
TRAINING_DIR = os.path.join(settings.BASE_DIR, 'training')
def retrain_network(
training_run,
intermediate_store_frequency=0,
learning_rate=0.01,
validation_percentage=10,
eval_step_interval=10,
train_batch_size=100,
test_batch_size=-1,
validation_batch_size=100,
print_misclassified_test_images=False,
flip_left_right=False,
random_crop=0,
random_scale=0,
random_brightness=0,
):
name = training_run.name
image_dir = training_run.image_dir
how_many_training_steps = training_run.training_steps
testing_percentage = training_run.testing_percentage
architecture = training_run.model
output_graph = os.path.join(TRAINING_DIR, name, 'retrained_graph.pb')
intermediate_output_graphs_dir = os.path.join(TRAINING_DIR, name, 'intermediate_graph')
output_labels = os.path.join(TRAINING_DIR, name, 'output_labels.txt')
summaries_dir = os.path.join(TRAINING_DIR, name, 'retrain_logs')
model_dir = os.path.join(TRAINING_DIR, 'models')
bottleneck_dir = os.path.join(TRAINING_DIR, name, 'bottleneck')
final_tensor_name = f'{name}_result'
def create_image_lists(image_dir, testing_percentage, validation_percentage):
"""Builds a list of training images from the file system.
Analyzes the sub folders in the image directory, splits them into stable
training, testing, and validation sets, and returns a data structure
describing the lists of images for each label and their paths.
Args:
image_dir: String path to a folder containing subfolders of images.
testing_percentage: Integer percentage of the images to reserve for tests.
validation_percentage: Integer percentage of images reserved for validation.
Returns:
A dictionary containing an entry for each label subfolder, with images split
into training, testing, and validation sets within each label.
"""
if not gfile.Exists(image_dir):
tf.logging.error("Image directory '" + image_dir + "' not found.")
return None
result = {}
sub_dirs = [x[0] for x in gfile.Walk(image_dir)]
# The root directory comes first, so skip it.
is_root_dir = True
for sub_dir in sub_dirs:
if is_root_dir:
is_root_dir = False
continue
extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']
file_list = []
dir_name = os.path.basename(sub_dir)
if dir_name == image_dir:
continue
tf.logging.info("Looking for images in '" + dir_name + "'")
for extension in extensions:
file_glob = os.path.join(image_dir, dir_name, '*.' + extension)
file_list.extend(gfile.Glob(file_glob))
if not file_list:
tf.logging.warning('No files found')
continue
if len(file_list) < 20:
tf.logging.warning(
'WARNING: Folder has less than 20 images, which may cause issues.')
elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS:
tf.logging.warning(
'WARNING: Folder {} has more than {} images. Some images will '
'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS))
label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())
training_images = []
testing_images = []
validation_images = []
for file_name in file_list:
base_name = os.path.basename(file_name)
# We want to ignore anything after '_nohash_' in the file name when
# deciding which set to put an image in, the data set creator has a way of
# grouping photos that are close variations of each other. For example
# this is used in the plant disease data set to group multiple pictures of
# the same leaf.
hash_name = re.sub(r'_nohash_.*$', '', file_name)
# This looks a bit magical, but we need to decide whether this file should
# go into the training, testing, or validation sets, and we want to keep
# existing files in the same set even if more files are subsequently
# added.
# To do that, we need a stable way of deciding based on just the file name
# itself, so we do a hash of that and then use that to generate a
# probability value that we use to assign it.
hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()
percentage_hash = ((int(hash_name_hashed, 16) %
(MAX_NUM_IMAGES_PER_CLASS + 1)) *
(100.0 / MAX_NUM_IMAGES_PER_CLASS))
if percentage_hash < validation_percentage:
validation_images.append(base_name)
elif percentage_hash < (testing_percentage + validation_percentage):
testing_images.append(base_name)
else:
training_images.append(base_name)
result[label_name] = {
'dir': dir_name,
'training': training_images,
'testing': testing_images,
'validation': validation_images,
}
return result
def get_image_path(image_lists, label_name, index, image_dir, category):
""""Returns a path to an image for a label at the given index.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Int offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of set to pull images from - training, testing, or
validation.
Returns:
File system path string to an image that meets the requested parameters.
"""
if label_name not in image_lists:
tf.logging.fatal('Label does not exist %s.', label_name)
label_lists = image_lists[label_name]
if category not in label_lists:
tf.logging.fatal('Category does not exist %s.', category)
category_list = label_lists[category]
if not category_list:
tf.logging.fatal('Label %s has no images in the category %s.',
label_name, category)
mod_index = index % len(category_list)
base_name = category_list[mod_index]
sub_dir = label_lists['dir']
full_path = os.path.join(image_dir, sub_dir, base_name)
return full_path
def get_bottleneck_path(image_lists, label_name, index, bottleneck_dir,
category, architecture):
""""Returns a path to a bottleneck file for a label at the given index.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
bottleneck_dir: Folder string holding cached files of bottleneck values.
category: Name string of set to pull images from - training, testing, or
validation.
architecture: The name of the model architecture.
Returns:
File system path string to an image that meets the requested parameters.
"""
return get_image_path(image_lists, label_name, index, bottleneck_dir,
category) + '_' + architecture + '.txt'
def create_model_graph(model_info):
""""Creates a graph from saved GraphDef file and returns a Graph object.
Args:
model_info: Dictionary containing information about the model architecture.
Returns:
Graph holding the trained Inception network, and various tensors we'll be
manipulating.
"""
with tf.Graph().as_default() as graph:
model_path = os.path.join(model_dir, model_info['model_file_name'])
with gfile.FastGFile(model_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
bottleneck_tensor, resized_input_tensor = (tf.import_graph_def(
graph_def,
name='',
return_elements=[
model_info['bottleneck_tensor_name'],
model_info['resized_input_tensor_name'],
]))
return graph, bottleneck_tensor, resized_input_tensor
def run_bottleneck_on_image(sess, image_data, image_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor):
"""Runs inference on an image to extract the 'bottleneck' summary layer.
Args:
sess: Current active TensorFlow Session.
image_data: String of raw JPEG data.
image_data_tensor: Input data layer in the graph.
decoded_image_tensor: Output of initial image resizing and preprocessing.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: Layer before the final softmax.
Returns:
Numpy array of bottleneck values.
"""
# First decode the JPEG image, resize it, and rescale the pixel values.
resized_input_values = sess.run(decoded_image_tensor,
{image_data_tensor: image_data})
# Then run it through the recognition network.
bottleneck_values = sess.run(bottleneck_tensor,
{resized_input_tensor: resized_input_values})
bottleneck_values = np.squeeze(bottleneck_values)
return bottleneck_values
def maybe_download_and_extract(data_url):
"""Download and extract model tar file.
If the pretrained model we're using doesn't already exist, this function
downloads it from the TensorFlow.org website and unpacks it into a directory.
Args:
data_url: Web location of the tar file containing the pretrained model.
"""
dest_directory = model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = data_url.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
training_run.progress = 0
training_run.set_status('downloading')
def _progress(count, block_size, total_size):
progress = float(count * block_size) / float(total_size)
training_run.progress = progress
training_run.save()
sys.stdout.write('\r>> Downloading %s %.1f%%' %
(filename, progress * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(data_url, filepath, _progress)
print()
statinfo = os.stat(filepath)
tf.logging.info('Successfully downloaded', filename, statinfo.st_size,
'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def ensure_dir_exists(dir_name):
"""Makes sure the folder exists on disk.
Args:
dir_name: Path string to the folder we want to create.
"""
if not os.path.exists(dir_name):
os.makedirs(dir_name)
bottleneck_path_2_bottleneck_values = {}
def create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor):
"""Create a single bottleneck file."""
tf.logging.info('Creating bottleneck at ' + bottleneck_path)
image_path = get_image_path(image_lists, label_name, index,
image_dir, category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
image_data = gfile.FastGFile(image_path, 'rb').read()
try:
bottleneck_values = run_bottleneck_on_image(
sess, image_data, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor)
except Exception as e:
raise RuntimeError('Error during processing file %s (%s)' % (image_path,
str(e)))
bottleneck_string = ','.join(str(x) for x in bottleneck_values)
with open(bottleneck_path, 'w') as bottleneck_file:
bottleneck_file.write(bottleneck_string)
def get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir,
category, bottleneck_dir, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor, architecture):
"""Retrieves or calculates bottleneck values for an image.
If a cached version of the bottleneck data exists on-disk, return that,
otherwise calculate the data and save it to disk for future use.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be modulo-ed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of which set to pull images from - training, testing,
or validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: The tensor to feed loaded jpeg data into.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The output tensor for the bottleneck values.
architecture: The name of the model architecture.
Returns:
Numpy array of values produced by the bottleneck layer for the image.
"""
label_lists = image_lists[label_name]
sub_dir = label_lists['dir']
sub_dir_path = os.path.join(bottleneck_dir, sub_dir)
ensure_dir_exists(sub_dir_path)
bottleneck_path = get_bottleneck_path(image_lists, label_name, index,
bottleneck_dir, category, architecture)
if not os.path.exists(bottleneck_path):
create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
did_hit_error = False
try:
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
except ValueError:
tf.logging.warning('Invalid float found, recreating bottleneck')
did_hit_error = True
if did_hit_error:
create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
# Allow exceptions to propagate here, since they shouldn't happen after a
# fresh creation
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
return bottleneck_values
def get_total_bottleneck_count(image_lists):
# count how many total bottlenecks will be created
total_bottleneck_files = 0
for label_name, label_lists in image_lists.items():
for category in ['training', 'testing', 'validation']:
category_list = label_lists[category]
for _ in enumerate(category_list):
total_bottleneck_files += 1
return total_bottleneck_files
def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,
jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, architecture):
"""Ensures all the training, testing, and validation bottlenecks are cached.
Because we're likely to read the same image multiple times (if there are no
distortions applied during training) it can speed things up a lot if we
calculate the bottleneck layer values once for each image during
preprocessing, and then just read those cached values repeatedly during
training. Here we go through all the images we've found, calculate those
values, and save them off.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
image_dir: Root folder string of the subfolders containing the training
images.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: Input tensor for jpeg data from file.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The penultimate output layer of the graph.
architecture: The name of the model architecture.
Returns:
Nothing.
"""
how_many_bottlenecks = 0
ensure_dir_exists(bottleneck_dir)
training_run.progress = 0
training_run.set_status('bottleneck')
total_bottleneck_files = get_total_bottleneck_count(image_lists)
for label_name, label_lists in image_lists.items():
for category in ['training', 'testing', 'validation']:
category_list = label_lists[category]
for index, unused_base_name in enumerate(category_list):
get_or_create_bottleneck(
sess, image_lists, label_name, index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, architecture)
how_many_bottlenecks += 1
training_run.progress = how_many_bottlenecks / total_bottleneck_files
training_run.save()
if how_many_bottlenecks % 100 == 0:
tf.logging.info(
str(how_many_bottlenecks) + ' bottleneck files created.')
def get_random_cached_bottlenecks(sess, image_lists, how_many, category,
bottleneck_dir, image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor, architecture):
"""Retrieves bottleneck values for cached images.
If no distortions are being applied, this function can retrieve the cached
bottleneck values directly from disk for images. It picks a random set of
images from the specified category.
Args:
sess: Current TensorFlow Session.
image_lists: Dictionary of training images for each label.
how_many: If positive, a random sample of this size will be chosen.
If negative, all bottlenecks will be retrieved.
category: Name string of which set to pull from - training, testing, or
validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
image_dir: Root folder string of the subfolders containing the training
images.
jpeg_data_tensor: The layer to feed jpeg image data into.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
architecture: The name of the model architecture.
Returns:
List of bottleneck arrays, their corresponding ground truths, and the
relevant filenames.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
filenames = []
if how_many >= 0:
# Retrieve a random sample of bottlenecks.
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(
sess, image_lists, label_name, image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, architecture)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
filenames.append(image_name)
else:
# Retrieve all bottlenecks.
for label_index, label_name in enumerate(image_lists.keys()):
for image_index, image_name in enumerate(
image_lists[label_name][category]):
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(
sess, image_lists, label_name, image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, architecture)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
filenames.append(image_name)
return bottlenecks, ground_truths, filenames
def get_random_distorted_bottlenecks(
sess, image_lists, how_many, category, image_dir, input_jpeg_tensor,
distorted_image, resized_input_tensor, bottleneck_tensor):
"""Retrieves bottleneck values for training images, after distortions.
If we're training with distortions like crops, scales, or flips, we have to
recalculate the full model for every image, and so we can't use cached
bottleneck values. Instead we find random images for the requested category,
run them through the distortion graph, and then the full graph to get the
bottleneck results for each.
Args:
sess: Current TensorFlow Session.
image_lists: Dictionary of training images for each label.
how_many: The integer number of bottleneck values to return.
category: Name string of which set of images to fetch - training, testing,
or validation.
image_dir: Root folder string of the subfolders containing the training
images.
input_jpeg_tensor: The input layer we feed the image data to.
distorted_image: The output node of the distortion graph.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
Returns:
List of bottleneck arrays and their corresponding ground truths.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_path = get_image_path(image_lists, label_name, image_index, image_dir,
category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
jpeg_data = gfile.FastGFile(image_path, 'rb').read()
# Note that we materialize the distorted_image_data as a numpy array before
# sending running inference on the image. This involves 2 memory copies and
# might be optimized in other implementations.
distorted_image_data = sess.run(distorted_image,
{input_jpeg_tensor: jpeg_data})
bottleneck_values = sess.run(bottleneck_tensor,
{resized_input_tensor: distorted_image_data})
bottleneck_values = np.squeeze(bottleneck_values)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck_values)
ground_truths.append(ground_truth)
return bottlenecks, ground_truths
def should_distort_images(flip_left_right, random_crop, random_scale,
random_brightness):
"""Whether any distortions are enabled, from the input
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
Returns:
Boolean value indicating whether any distortions should be applied.
"""
return (flip_left_right or (random_crop != 0) or (random_scale != 0) or
(random_brightness != 0))
def add_input_distortions(flip_left_right, random_crop, random_scale,
random_brightness, input_width, input_height,
input_depth, input_mean, input_std):
"""Creates the operations to apply the specified distortions.
During training it can help to improve the results if we run the images
through simple distortions like crops, scales, and flips. These reflect the
kind of variations we expect in the real world, and so can help train the
model to cope with natural data more effectively. Here we take the supplied
parameters and construct a network of operations to apply them to an image.
Cropping
~~~~~~~~
Cropping is done by placing a bounding box at a random position in the full
image. The cropping parameter controls the size of that box relative to the
input image. If it's zero, then the box is the same size as the input and no
cropping is performed. If the value is 50%, then the crop box will be half the
width and height of the input. In a diagram it looks like this:
< width >
+---------------------+
| |
| width - crop% |
| < > |
| +------+ |
| | | |
| | | |
| | | |
| +------+ |
| |
| |
+---------------------+
Scaling
~~~~~~~
Scaling is a lot like cropping, except that the bounding box is always
centered and its size varies randomly within the given range. For example if
the scale percentage is zero, then the bounding box is the same size as the
input and no scaling is applied. If it's 50%, then the bounding box will be in
a random range between half the width and height and full size.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
graph.
input_width: Horizontal size of expected input image to model.
input_height: Vertical size of expected input image to model.
input_depth: How many channels the expected input image should have.
input_mean: Pixel value that should be zero in the image for the graph.
input_std: How much to divide the pixel values by before recognition.
Returns:
The jpeg input layer and the distorted result tensor.
"""
jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)
decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
margin_scale = 1.0 + (random_crop / 100.0)
resize_scale = 1.0 + (random_scale / 100.0)
margin_scale_value = tf.constant(margin_scale)
resize_scale_value = tf.random_uniform(tensor_shape.scalar(),
minval=1.0,
maxval=resize_scale)
scale_value = tf.multiply(margin_scale_value, resize_scale_value)
precrop_width = tf.multiply(scale_value, input_width)
precrop_height = tf.multiply(scale_value, input_height)
precrop_shape = tf.stack([precrop_height, precrop_width])
precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32)
precropped_image = tf.image.resize_bilinear(decoded_image_4d,
precrop_shape_as_int)
precropped_image_3d = tf.squeeze(precropped_image, squeeze_dims=[0])
cropped_image = tf.random_crop(precropped_image_3d,
[input_height, input_width, input_depth])
if flip_left_right:
flipped_image = tf.image.random_flip_left_right(cropped_image)
else:
flipped_image = cropped_image
brightness_min = 1.0 - (random_brightness / 100.0)
brightness_max = 1.0 + (random_brightness / 100.0)
brightness_value = tf.random_uniform(tensor_shape.scalar(),
minval=brightness_min,
maxval=brightness_max)
brightened_image = tf.multiply(flipped_image, brightness_value)
offset_image = tf.subtract(brightened_image, input_mean)
mul_image = tf.multiply(offset_image, 1.0 / input_std)
distort_result = tf.expand_dims(mul_image, 0, name='DistortResult')
return jpeg_data, distort_result
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor,
bottleneck_tensor_size):
"""Adds a new softmax and fully-connected layer for training.
We need to retrain the top layer to identify our new classes, so this function
adds the right operations to the graph, along with some variables to hold the
weights, and then sets up all the gradients for the backward pass.
The set up for the softmax and fully-connected layers is based on:
https://www.tensorflow.org/versions/master/tutorials/mnist/beginners/index.html
Args:
class_count: Integer of how many categories of things we're trying to
recognize.
final_tensor_name: Name string for the new final node that produces results.
bottleneck_tensor: The output of the main CNN graph.
bottleneck_tensor_size: How many entries in the bottleneck vector.
Returns:
The tensors for the training and cross entropy results, and tensors for the
bottleneck input and ground truth input.
"""
with tf.name_scope('input'):
bottleneck_input = tf.placeholder_with_default(
bottleneck_tensor,
shape=[None, bottleneck_tensor_size],
name='BottleneckInputPlaceholder')
ground_truth_input = tf.placeholder(tf.float32,
[None, class_count],
name='GroundTruthInput')
# Organizing the following ops as `final_training_ops` so they're easier
# to see in TensorBoard
layer_name = 'final_training_ops'
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
initial_value = tf.truncated_normal(
[bottleneck_tensor_size, class_count], stddev=0.001)
layer_weights = tf.Variable(initial_value, name='final_weights')
variable_summaries(layer_weights)
with tf.name_scope('biases'):
layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')
variable_summaries(layer_biases)
with tf.name_scope('Wx_plus_b'):
logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases
tf.summary.histogram('pre_activations', logits)
final_tensor = tf.nn.softmax(logits, name=final_tensor_name)
tf.summary.histogram('activations', final_tensor)
with tf.name_scope('cross_entropy'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
labels=ground_truth_input, logits=logits)
with tf.name_scope('total'):
cross_entropy_mean = tf.reduce_mean(cross_entropy)
tf.summary.scalar('cross_entropy', cross_entropy_mean)
with tf.name_scope('train'):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_step = optimizer.minimize(cross_entropy_mean)
return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input,
final_tensor)
def add_evaluation_step(result_tensor, ground_truth_tensor):
"""Inserts the operations we need to evaluate the accuracy of our results.
Args:
result_tensor: The new final node that produces results.
ground_truth_tensor: The node we feed ground truth data
into.
Returns:
Tuple of (evaluation step, prediction).
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
prediction = tf.argmax(result_tensor, 1)
correct_prediction = tf.equal(
prediction, tf.argmax(ground_truth_tensor, 1))
with tf.name_scope('accuracy'):
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', evaluation_step)
return evaluation_step, prediction
def save_graph_to_file(sess, graph, graph_file_name):
output_graph_def = graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), [final_tensor_name])
with gfile.FastGFile(graph_file_name, 'wb') as f:
f.write(output_graph_def.SerializeToString())
return
def prepare_file_system():
# Setup the directory we'll write summaries to for TensorBoard
if tf.gfile.Exists(summaries_dir):
tf.gfile.DeleteRecursively(summaries_dir)
tf.gfile.MakeDirs(summaries_dir)
if intermediate_store_frequency > 0:
ensure_dir_exists(intermediate_output_graphs_dir)
return
def create_model_info(architecture):
"""Given the name of a model architecture, returns information about it.
There are different base image recognition pretrained models that can be
retrained using transfer learning, and this function translates from the name
of a model to the attributes that are needed to download and train with it.
Args:
architecture: Name of a model architecture.
Returns:
Dictionary of information about the model, or None if the name isn't
recognized
Raises:
ValueError: If architecture name is unknown.
"""
architecture = architecture.lower()
if architecture == 'inception_v3':
# pylint: disable=line-too-long
data_url = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# pylint: enable=line-too-long
bottleneck_tensor_name = 'pool_3/_reshape:0'
bottleneck_tensor_size = 2048
input_width = 299
input_height = 299
input_depth = 3
resized_input_tensor_name = 'Mul:0'
model_file_name = 'classify_image_graph_def.pb'
input_mean = 128
input_std = 128
elif architecture.startswith('mobilenet_'):
parts = architecture.split('_')
if len(parts) != 3 and len(parts) != 4:
tf.logging.error("Couldn't understand architecture name '%s'",
architecture)
return None
version_string = parts[1]
if (version_string != '1.0' and version_string != '0.75' and
version_string != '0.50' and version_string != '0.25'):
tf.logging.error(
""""The Mobilenet version should be '1.0', '0.75', '0.50', or '0.25',
but found '%s' for architecture '%s'""",
version_string, architecture)
return None
size_string = parts[2]
if (size_string != '224' and size_string != '192' and
size_string != '160' and size_string != '128'):
tf.logging.error(
"""The Mobilenet input size should be '224', '192', '160', or '128',
but found '%s' for architecture '%s'""",
size_string, architecture)
return None
if len(parts) == 3:
is_quantized = False
else:
if parts[3] != 'quantized':
tf.logging.error(
"Couldn't understand architecture suffix '%s' for '%s'", parts[3],
architecture)
return None
is_quantized = True
data_url = 'http://download.tensorflow.org/models/mobilenet_v1_'
data_url += version_string + '_' + size_string + '_frozen.tgz'
bottleneck_tensor_name = 'MobilenetV1/Predictions/Reshape:0'
bottleneck_tensor_size = 1001
input_width = int(size_string)
input_height = int(size_string)
input_depth = 3
resized_input_tensor_name = 'input:0'
if is_quantized:
model_base_name = 'quantized_graph.pb'
else:
model_base_name = 'frozen_graph.pb'
model_dir_name = 'mobilenet_v1_' + version_string + '_' + size_string
model_file_name = os.path.join(model_dir_name, model_base_name)
input_mean = 127.5
input_std = 127.5
else:
tf.logging.error("Couldn't understand architecture name '%s'", architecture)
raise ValueError('Unknown architecture', architecture)
return {
'data_url': data_url,
'bottleneck_tensor_name': bottleneck_tensor_name,
'bottleneck_tensor_size': bottleneck_tensor_size,
'input_width': input_width,
'input_height': input_height,
'input_depth': input_depth,
'resized_input_tensor_name': resized_input_tensor_name,
'model_file_name': model_file_name,
'input_mean': input_mean,
'input_std': input_std,
}
def add_jpeg_decoding(input_width, input_height, input_depth, input_mean,
input_std):
"""Adds operations that perform JPEG decoding and resizing to the graph..
Args:
input_width: Desired width of the image fed into the recognizer graph.
input_height: Desired width of the image fed into the recognizer graph.
input_depth: Desired channels of the image fed into the recognizer graph.
input_mean: Pixel value that should be zero in the image for the graph.
input_std: How much to divide the pixel values by before recognition.
Returns:
Tensors for the node to feed JPEG data into, and the output of the
preprocessing steps.
"""
jpeg_data = tf.placeholder(tf.string, name='DecodeJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)
decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
resize_shape = tf.stack([input_height, input_width])
resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32)
resized_image = tf.image.resize_bilinear(decoded_image_4d,
resize_shape_as_int)
offset_image = tf.subtract(resized_image, input_mean)
mul_image = tf.multiply(offset_image, 1.0 / input_std)
return jpeg_data, mul_image
def main(_):
# Needed to make sure the logging output is visible.
# See https://github.com/tensorflow/tensorflow/issues/3047
tf.logging.set_verbosity(tf.logging.INFO)
# Prepare necessary directories that can be used during training
prepare_file_system()
# Gather information about the model architecture we'll be using.
model_info = create_model_info(architecture)
if not model_info:
tf.logging.error('Did not recognize architecture flag')
return -1
# Set up the pre-trained graph.
maybe_download_and_extract(model_info['data_url'])
graph, bottleneck_tensor, resized_image_tensor = (
create_model_graph(model_info))
# Look at the folder structure, and create lists of all the images.
image_lists = create_image_lists(image_dir, testing_percentage,
validation_percentage)
class_count = len(image_lists.keys())
if class_count == 0:
tf.logging.error('No valid folders of images found at ' + image_dir)
return -1
if class_count == 1:
tf.logging.error('Only one valid folder of images found at ' +
image_dir +
' - multiple classes are needed for classification.')
return -1
# See if the command-line flags mean we're applying any distortions.
do_distort_images = should_distort_images(
flip_left_right, random_crop, random_scale,
random_brightness)
with tf.Session(graph=graph) as sess:
# Set up the image decoding sub-graph.
jpeg_data_tensor, decoded_image_tensor = add_jpeg_decoding(
model_info['input_width'], model_info['input_height'],
model_info['input_depth'], model_info['input_mean'],
model_info['input_std'])
if do_distort_images:
# We will be applying distortions, so setup the operations we'll need.
(distorted_jpeg_data_tensor,
distorted_image_tensor) = add_input_distortions(
flip_left_right, random_crop, random_scale,
random_brightness, model_info['input_width'],
model_info['input_height'], model_info['input_depth'],
model_info['input_mean'], model_info['input_std'])
else:
# We'll make sure we've calculated the 'bottleneck' image summaries and
# cached them on disk.
cache_bottlenecks(sess, image_lists, image_dir,
bottleneck_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor,
bottleneck_tensor, architecture)
# Add the new layer that we'll be training.
(train_step, cross_entropy, bottleneck_input, ground_truth_input,
final_tensor) = add_final_training_ops(
len(image_lists.keys()), final_tensor_name, bottleneck_tensor,
model_info['bottleneck_tensor_size'])
# Create the operations we need to evaluate the accuracy of our new layer.
evaluation_step, prediction = add_evaluation_step(
final_tensor, ground_truth_input)
# Merge all the summaries and write them out to the summaries_dir
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(summaries_dir + '/train',
sess.graph)
validation_writer = tf.summary.FileWriter(
summaries_dir + '/validation')
# Set up all our weights to their initial default values.
init = tf.global_variables_initializer()
sess.run(init)
training_run.progress = 0
training_run.set_status('training')
# Run the training for as many cycles as requested on the command line.
for i in range(how_many_training_steps):
training_run.progress = i / how_many_training_steps
training_run.save()
# Get a batch of input bottleneck values, either calculated fresh every
# time with distortions applied, or from the cache stored on disk.
if do_distort_images:
(train_bottlenecks,
train_ground_truth) = get_random_distorted_bottlenecks(
sess, image_lists, train_batch_size, 'training',
image_dir, distorted_jpeg_data_tensor,
distorted_image_tensor, resized_image_tensor, bottleneck_tensor)
else:
(train_bottlenecks,
train_ground_truth, _) = get_random_cached_bottlenecks(
sess, image_lists, train_batch_size, 'training',
bottleneck_dir, image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor, bottleneck_tensor,
architecture)
# Feed the bottlenecks and ground truth into the graph, and run a training
# step. Capture training summaries for TensorBoard with the `merged` op.
train_summary, _ = sess.run(
[merged, train_step],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
train_writer.add_summary(train_summary, i)
# Every so often, print out how well the graph is training.
is_last_step = (i + 1 == how_many_training_steps)
if (i % eval_step_interval) == 0 or is_last_step:
train_accuracy, cross_entropy_value = sess.run(
[evaluation_step, cross_entropy],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
tf.logging.info('%s: Step %d: Train accuracy = %.1f%%' %
(datetime.now(), i, train_accuracy * 100))
tf.logging.info('%s: Step %d: Cross entropy = %f' %
(datetime.now(), i, cross_entropy_value))
validation_bottlenecks, validation_ground_truth, _ = (
get_random_cached_bottlenecks(
sess, image_lists, validation_batch_size, 'validation',
bottleneck_dir, image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor, bottleneck_tensor,
architecture))
# Run a validation step and capture training summaries for TensorBoard
# with the `merged` op.
validation_summary, validation_accuracy = sess.run(
[merged, evaluation_step],
feed_dict={bottleneck_input: validation_bottlenecks,
ground_truth_input: validation_ground_truth})
validation_writer.add_summary(validation_summary, i)
tf.logging.info('%s: Step %d: Validation accuracy = %.1f%% (N=%d)' %
(datetime.now(), i, validation_accuracy * 100,
len(validation_bottlenecks)))
# Store intermediate results
intermediate_frequency = intermediate_store_frequency
if (intermediate_frequency > 0 and (i % intermediate_frequency == 0)
and i > 0):
intermediate_file_name = (intermediate_output_graphs_dir +
'intermediate_' + str(i) + '.pb')
tf.logging.info('Save intermediate result to : ' +
intermediate_file_name)
save_graph_to_file(sess, graph, intermediate_file_name)
# We've completed all our training, so run a final test evaluation on
# some new images we haven't used before.
test_bottlenecks, test_ground_truth, test_filenames = (
get_random_cached_bottlenecks(
sess, image_lists, test_batch_size, 'testing',
bottleneck_dir, image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor, bottleneck_tensor,
architecture))
test_accuracy, predictions = sess.run(
[evaluation_step, prediction],
feed_dict={bottleneck_input: test_bottlenecks,
ground_truth_input: test_ground_truth})
tf.logging.info('Final test accuracy = %.1f%% (N=%d)' %
(test_accuracy * 100, len(test_bottlenecks)))
# set final accuracy
training_run.progress = 100
training_run.accuracy = test_accuracy
training_run.save()
if print_misclassified_test_images:
tf.logging.info('=== MISCLASSIFIED TEST IMAGES ===')
for i, test_filename in enumerate(test_filenames):
if predictions[i] != test_ground_truth[i].argmax():
tf.logging.info('%70s %s' %
(test_filename,
list(image_lists.keys())[predictions[i]]))
# Write out the trained graph and labels with the weights stored as
# constants.
save_graph_to_file(sess, graph, output_graph)
with gfile.FastGFile(output_labels, 'w') as f:
f.write('\n'.join(image_lists.keys()) + '\n')
tf.app.run(main=main, argv=[sys.argv[0]])
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--image_dir',
type=str,
default='',
help='Path to folders of labeled images.'
)
parser.add_argument(
'--output_graph',
type=str,
default='/tmp/output_graph.pb',
help='Where to save the trained graph.'
)
parser.add_argument(
'--intermediate_output_graphs_dir',
type=str,
default='/tmp/intermediate_graph/',
help='Where to save the intermediate graphs.'
)
parser.add_argument(
'--intermediate_store_frequency',
type=int,
default=0,
help="""\
How many steps to store intermediate graph. If "0" then will not
store.\
"""
)
parser.add_argument(
'--output_labels',
type=str,
default='/tmp/output_labels.txt',
help='Where to save the trained graph\'s labels.'
)
parser.add_argument(
'--summaries_dir',
type=str,
default='/tmp/retrain_logs',
help='Where to save summary logs for TensorBoard.'
)
parser.add_argument(
'--how_many_training_steps',
type=int,
default=4000,
help='How many training steps to run before ending.'
)
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='How large a learning rate to use when training.'
)
parser.add_argument(
'--testing_percentage',
type=int,
default=10,
help='What percentage of images to use as a test set.'
)
parser.add_argument(
'--validation_percentage',
type=int,
default=10,
help='What percentage of images to use as a validation set.'
)
parser.add_argument(
'--eval_step_interval',
type=int,
default=10,
help='How often to evaluate the training results.'
)
parser.add_argument(
'--train_batch_size',
type=int,
default=100,
help='How many images to train on at a time.'
)
parser.add_argument(
'--test_batch_size',
type=int,
default=-1,
help="""\
How many images to test on. This test set is only used once, to evaluate
the final accuracy of the model after training completes.
A value of -1 causes the entire test set to be used, which leads to more
stable results across runs.\
"""
)
parser.add_argument(
'--validation_batch_size',
type=int,
default=100,
help="""\
How many images to use in an evaluation batch. This validation set is
used much more often than the test set, and is an early indicator of how
accurate the model is during training.
A value of -1 causes the entire validation set to be used, which leads to
more stable results across training iterations, but may be slower on large
training sets.\
"""
)
parser.add_argument(
'--print_misclassified_test_images',
default=False,
help="""\
Whether to print out a list of all misclassified test images.\
""",
action='store_true'
)
parser.add_argument(
'--model_dir',
type=str,
default='/tmp/imagenet',
help="""\
Path to classify_image_graph_def.pb,
imagenet_synset_to_human_label_map.txt, and
imagenet_2012_challenge_label_map_proto.pbtxt.\
"""
)
parser.add_argument(
'--bottleneck_dir',
type=str,
default='/tmp/bottleneck',
help='Path to cache bottleneck layer values as files.'
)
parser.add_argument(
'--final_tensor_name',
type=str,
default='final_result',
help="""\
The name of the output classification layer in the retrained graph.\
"""
)
parser.add_argument(
'--flip_left_right',
default=False,
help="""\
Whether to randomly flip half of the training images horizontally.\
""",
action='store_true'
)
parser.add_argument(
'--random_crop',
type=int,
default=0,
help="""\
A percentage determining how much of a margin to randomly crop off the
training images.\
"""
)
parser.add_argument(
'--random_scale',
type=int,
default=0,
help="""\
A percentage determining how much to randomly scale up the size of the
training images by.\
"""
)
parser.add_argument(
'--random_brightness',
type=int,
default=0,
help="""\
A percentage determining how much to randomly multiply the training image
input pixels up or down by.\
"""
)
parser.add_argument(
'--architecture',
type=str,
default='inception_v3',
help="""\
Which model architecture to use. 'inception_v3' is the most accurate, but
also the slowest. For faster or smaller models, chose a MobileNet with the
form 'mobilenet_<parameter size>_<input_size>[_quantized]'. For example,
'mobilenet_1.0_224' will pick a model that is 17 MB in size and takes 224
pixel input images, while 'mobilenet_0.25_128_quantized' will choose a much
less accurate, but smaller and faster network that's 920 KB on disk and
takes 128x128 images. See https://research.googleblog.com/2017/06/mobilenets-open-source-models-for.html
for more information on Mobilenet.\
""")
| [
"sys.stdout.write",
"tensorflow.python.platform.gfile.FastGFile",
"tensorflow.gfile.Exists",
"tensorflow.python.util.compat.as_bytes",
"argparse.ArgumentParser",
"tensorflow.logging.info",
"tensorflow.logging.error",
"tensorflow.logging.warning",
"tensorflow.python.platform.gfile.Walk",
"tensorflo... | [((57959, 58000), 'tensorflow.app.run', 'tf.app.run', ([], {'main': 'main', 'argv': '[sys.argv[0]]'}), '(main=main, argv=[sys.argv[0]])\n', (57969, 58000), True, 'import tensorflow as tf\n'), ((58058, 58083), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (58081, 58083), False, 'import argparse\n'), ((14689, 14718), 'numpy.squeeze', 'np.squeeze', (['bottleneck_values'], {}), '(bottleneck_values)\n', (14699, 14718), True, 'import numpy as np\n'), ((16835, 16895), 'tensorflow.logging.info', 'tf.logging.info', (["('Creating bottleneck at ' + bottleneck_path)"], {}), "('Creating bottleneck at ' + bottleneck_path)\n", (16850, 16895), True, 'import tensorflow as tf\n'), ((34697, 34746), 'tensorflow.placeholder', 'tf.placeholder', (['tf.string'], {'name': '"""DistortJPGInput"""'}), "(tf.string, name='DistortJPGInput')\n", (34711, 34746), True, 'import tensorflow as tf\n'), ((34771, 34824), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['jpeg_data'], {'channels': 'input_depth'}), '(jpeg_data, channels=input_depth)\n', (34791, 34824), True, 'import tensorflow as tf\n'), ((34858, 34898), 'tensorflow.cast', 'tf.cast', (['decoded_image'], {'dtype': 'tf.float32'}), '(decoded_image, dtype=tf.float32)\n', (34865, 34898), True, 'import tensorflow as tf\n'), ((34926, 34967), 'tensorflow.expand_dims', 'tf.expand_dims', (['decoded_image_as_float', '(0)'], {}), '(decoded_image_as_float, 0)\n', (34940, 34967), True, 'import tensorflow as tf\n'), ((35100, 35125), 'tensorflow.constant', 'tf.constant', (['margin_scale'], {}), '(margin_scale)\n', (35111, 35125), True, 'import tensorflow as tf\n'), ((35345, 35396), 'tensorflow.multiply', 'tf.multiply', (['margin_scale_value', 'resize_scale_value'], {}), '(margin_scale_value, resize_scale_value)\n', (35356, 35396), True, 'import tensorflow as tf\n'), ((35421, 35458), 'tensorflow.multiply', 'tf.multiply', (['scale_value', 'input_width'], {}), '(scale_value, input_width)\n', (35432, 35458), True, 'import tensorflow as tf\n'), ((35484, 35522), 'tensorflow.multiply', 'tf.multiply', (['scale_value', 'input_height'], {}), '(scale_value, input_height)\n', (35495, 35522), True, 'import tensorflow as tf\n'), ((35547, 35588), 'tensorflow.stack', 'tf.stack', (['[precrop_height, precrop_width]'], {}), '([precrop_height, precrop_width])\n', (35555, 35588), True, 'import tensorflow as tf\n'), ((35620, 35658), 'tensorflow.cast', 'tf.cast', (['precrop_shape'], {'dtype': 'tf.int32'}), '(precrop_shape, dtype=tf.int32)\n', (35627, 35658), True, 'import tensorflow as tf\n'), ((35686, 35750), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['decoded_image_4d', 'precrop_shape_as_int'], {}), '(decoded_image_4d, precrop_shape_as_int)\n', (35710, 35750), True, 'import tensorflow as tf\n'), ((35833, 35879), 'tensorflow.squeeze', 'tf.squeeze', (['precropped_image'], {'squeeze_dims': '[0]'}), '(precropped_image, squeeze_dims=[0])\n', (35843, 35879), True, 'import tensorflow as tf\n'), ((35904, 35981), 'tensorflow.random_crop', 'tf.random_crop', (['precropped_image_3d', '[input_height, input_width, input_depth]'], {}), '(precropped_image_3d, [input_height, input_width, input_depth])\n', (35918, 35981), True, 'import tensorflow as tf\n'), ((36529, 36573), 'tensorflow.multiply', 'tf.multiply', (['flipped_image', 'brightness_value'], {}), '(flipped_image, brightness_value)\n', (36540, 36573), True, 'import tensorflow as tf\n'), ((36597, 36638), 'tensorflow.subtract', 'tf.subtract', (['brightened_image', 'input_mean'], {}), '(brightened_image, input_mean)\n', (36608, 36638), True, 'import tensorflow as tf\n'), ((36659, 36701), 'tensorflow.multiply', 'tf.multiply', (['offset_image', '(1.0 / input_std)'], {}), '(offset_image, 1.0 / input_std)\n', (36670, 36701), True, 'import tensorflow as tf\n'), ((36727, 36777), 'tensorflow.expand_dims', 'tf.expand_dims', (['mul_image', '(0)'], {'name': '"""DistortResult"""'}), "(mul_image, 0, name='DistortResult')\n", (36741, 36777), True, 'import tensorflow as tf\n'), ((39863, 39908), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {'name': 'final_tensor_name'}), '(logits, name=final_tensor_name)\n', (39876, 39908), True, 'import tensorflow as tf\n'), ((39917, 39966), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""activations"""', 'final_tensor'], {}), "('activations', final_tensor)\n", (39937, 39966), True, 'import tensorflow as tf\n'), ((40256, 40310), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""cross_entropy"""', 'cross_entropy_mean'], {}), "('cross_entropy', cross_entropy_mean)\n", (40273, 40310), True, 'import tensorflow as tf\n'), ((41407, 41453), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""accuracy"""', 'evaluation_step'], {}), "('accuracy', evaluation_step)\n", (41424, 41453), True, 'import tensorflow as tf\n'), ((41932, 41962), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['summaries_dir'], {}), '(summaries_dir)\n', (41947, 41962), True, 'import tensorflow as tf\n'), ((42026, 42058), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['summaries_dir'], {}), '(summaries_dir)\n', (42043, 42058), True, 'import tensorflow as tf\n'), ((47262, 47310), 'tensorflow.placeholder', 'tf.placeholder', (['tf.string'], {'name': '"""DecodeJPGInput"""'}), "(tf.string, name='DecodeJPGInput')\n", (47276, 47310), True, 'import tensorflow as tf\n'), ((47335, 47388), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['jpeg_data'], {'channels': 'input_depth'}), '(jpeg_data, channels=input_depth)\n', (47355, 47388), True, 'import tensorflow as tf\n'), ((47422, 47462), 'tensorflow.cast', 'tf.cast', (['decoded_image'], {'dtype': 'tf.float32'}), '(decoded_image, dtype=tf.float32)\n', (47429, 47462), True, 'import tensorflow as tf\n'), ((47490, 47531), 'tensorflow.expand_dims', 'tf.expand_dims', (['decoded_image_as_float', '(0)'], {}), '(decoded_image_as_float, 0)\n', (47504, 47531), True, 'import tensorflow as tf\n'), ((47555, 47592), 'tensorflow.stack', 'tf.stack', (['[input_height, input_width]'], {}), '([input_height, input_width])\n', (47563, 47592), True, 'import tensorflow as tf\n'), ((47623, 47660), 'tensorflow.cast', 'tf.cast', (['resize_shape'], {'dtype': 'tf.int32'}), '(resize_shape, dtype=tf.int32)\n', (47630, 47660), True, 'import tensorflow as tf\n'), ((47685, 47748), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['decoded_image_4d', 'resize_shape_as_int'], {}), '(decoded_image_4d, resize_shape_as_int)\n', (47709, 47748), True, 'import tensorflow as tf\n'), ((47821, 47859), 'tensorflow.subtract', 'tf.subtract', (['resized_image', 'input_mean'], {}), '(resized_image, input_mean)\n', (47832, 47859), True, 'import tensorflow as tf\n'), ((47880, 47922), 'tensorflow.multiply', 'tf.multiply', (['offset_image', '(1.0 / input_std)'], {}), '(offset_image, 1.0 / input_std)\n', (47891, 47922), True, 'import tensorflow as tf\n'), ((48113, 48154), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (48137, 48154), True, 'import tensorflow as tf\n'), ((6467, 6490), 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['image_dir'], {}), '(image_dir)\n', (6479, 6490), False, 'from tensorflow.python.platform import gfile\n'), ((6504, 6570), 'tensorflow.logging.error', 'tf.logging.error', (['("Image directory \'" + image_dir + "\' not found.")'], {}), '("Image directory \'" + image_dir + "\' not found.")\n', (6520, 6570), True, 'import tensorflow as tf\n'), ((7082, 7141), 'tensorflow.logging.info', 'tf.logging.info', (['("Looking for images in \'" + dir_name + "\'")'], {}), '("Looking for images in \'" + dir_name + "\'")\n', (7097, 7141), True, 'import tensorflow as tf\n'), ((10798, 10854), 'tensorflow.logging.fatal', 'tf.logging.fatal', (['"""Label does not exist %s."""', 'label_name'], {}), "('Label does not exist %s.', label_name)\n", (10814, 10854), True, 'import tensorflow as tf\n'), ((10953, 11010), 'tensorflow.logging.fatal', 'tf.logging.fatal', (['"""Category does not exist %s."""', 'category'], {}), "('Category does not exist %s.', category)\n", (10969, 11010), True, 'import tensorflow as tf\n'), ((11099, 11187), 'tensorflow.logging.fatal', 'tf.logging.fatal', (['"""Label %s has no images in the category %s."""', 'label_name', 'category'], {}), "('Label %s has no images in the category %s.', label_name,\n category)\n", (11115, 11187), True, 'import tensorflow as tf\n'), ((15889, 15946), 'six.moves.urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['data_url', 'filepath', '_progress'], {}), '(data_url, filepath, _progress)\n', (15915, 15946), False, 'from six.moves import urllib\n'), ((16020, 16105), 'tensorflow.logging.info', 'tf.logging.info', (['"""Successfully downloaded"""', 'filename', 'statinfo.st_size', '"""bytes."""'], {}), "('Successfully downloaded', filename, statinfo.st_size, 'bytes.'\n )\n", (16035, 16105), True, 'import tensorflow as tf\n'), ((17036, 17060), 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['image_path'], {}), '(image_path)\n', (17048, 17060), False, 'from tensorflow.python.platform import gfile\n'), ((17074, 17128), 'tensorflow.logging.fatal', 'tf.logging.fatal', (['"""File does not exist %s"""', 'image_path'], {}), "('File does not exist %s', image_path)\n", (17090, 17128), True, 'import tensorflow as tf\n'), ((29688, 29717), 'random.randrange', 'random.randrange', (['class_count'], {}), '(class_count)\n', (29704, 29717), False, 'import random\n'), ((29807, 29853), 'random.randrange', 'random.randrange', (['(MAX_NUM_IMAGES_PER_CLASS + 1)'], {}), '(MAX_NUM_IMAGES_PER_CLASS + 1)\n', (29823, 29853), False, 'import random\n'), ((30725, 30754), 'numpy.squeeze', 'np.squeeze', (['bottleneck_values'], {}), '(bottleneck_values)\n', (30735, 30754), True, 'import numpy as np\n'), ((30782, 30821), 'numpy.zeros', 'np.zeros', (['class_count'], {'dtype': 'np.float32'}), '(class_count, dtype=np.float32)\n', (30790, 30821), True, 'import numpy as np\n'), ((35173, 35194), 'tensorflow.python.framework.tensor_shape.scalar', 'tensor_shape.scalar', ([], {}), '()\n', (35192, 35194), False, 'from tensorflow.python.framework import tensor_shape\n'), ((36077, 36123), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['cropped_image'], {}), '(cropped_image)\n', (36108, 36123), True, 'import tensorflow as tf\n'), ((36343, 36364), 'tensorflow.python.framework.tensor_shape.scalar', 'tensor_shape.scalar', ([], {}), '()\n', (36362, 36364), False, 'from tensorflow.python.framework import tensor_shape\n'), ((36951, 36977), 'tensorflow.name_scope', 'tf.name_scope', (['"""summaries"""'], {}), "('summaries')\n", (36964, 36977), True, 'import tensorflow as tf\n'), ((36998, 37017), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['var'], {}), '(var)\n', (37012, 37017), True, 'import tensorflow as tf\n'), ((37030, 37061), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""mean"""', 'mean'], {}), "('mean', mean)\n", (37047, 37061), True, 'import tensorflow as tf\n'), ((37188, 37223), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""stddev"""', 'stddev'], {}), "('stddev', stddev)\n", (37205, 37223), True, 'import tensorflow as tf\n'), ((37350, 37388), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""histogram"""', 'var'], {}), "('histogram', var)\n", (37370, 37388), True, 'import tensorflow as tf\n'), ((38542, 38564), 'tensorflow.name_scope', 'tf.name_scope', (['"""input"""'], {}), "('input')\n", (38555, 38564), True, 'import tensorflow as tf\n'), ((38597, 38720), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['bottleneck_tensor'], {'shape': '[None, bottleneck_tensor_size]', 'name': '"""BottleneckInputPlaceholder"""'}), "(bottleneck_tensor, shape=[None,\n bottleneck_tensor_size], name='BottleneckInputPlaceholder')\n", (38624, 38720), True, 'import tensorflow as tf\n'), ((38800, 38872), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, class_count]'], {'name': '"""GroundTruthInput"""'}), "(tf.float32, [None, class_count], name='GroundTruthInput')\n", (38814, 38872), True, 'import tensorflow as tf\n'), ((39138, 39163), 'tensorflow.name_scope', 'tf.name_scope', (['layer_name'], {}), '(layer_name)\n', (39151, 39163), True, 'import tensorflow as tf\n'), ((39981, 40011), 'tensorflow.name_scope', 'tf.name_scope', (['"""cross_entropy"""'], {}), "('cross_entropy')\n", (39994, 40011), True, 'import tensorflow as tf\n'), ((40041, 40127), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'labels': 'ground_truth_input', 'logits': 'logits'}), '(labels=ground_truth_input, logits=\n logits)\n', (40080, 40127), True, 'import tensorflow as tf\n'), ((40325, 40347), 'tensorflow.name_scope', 'tf.name_scope', (['"""train"""'], {}), "('train')\n", (40338, 40347), True, 'import tensorflow as tf\n'), ((40373, 40421), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (40406, 40421), True, 'import tensorflow as tf\n'), ((41013, 41038), 'tensorflow.name_scope', 'tf.name_scope', (['"""accuracy"""'], {}), "('accuracy')\n", (41026, 41038), True, 'import tensorflow as tf\n'), ((41700, 41738), 'tensorflow.python.platform.gfile.FastGFile', 'gfile.FastGFile', (['graph_file_name', '"""wb"""'], {}), "(graph_file_name, 'wb')\n", (41715, 41738), False, 'from tensorflow.python.platform import gfile\n'), ((41976, 42017), 'tensorflow.gfile.DeleteRecursively', 'tf.gfile.DeleteRecursively', (['summaries_dir'], {}), '(summaries_dir)\n', (42002, 42017), True, 'import tensorflow as tf\n'), ((48427, 48482), 'tensorflow.logging.error', 'tf.logging.error', (['"""Did not recognize architecture flag"""'], {}), "('Did not recognize architecture flag')\n", (48443, 48482), True, 'import tensorflow as tf\n'), ((49008, 49076), 'tensorflow.logging.error', 'tf.logging.error', (["('No valid folders of images found at ' + image_dir)"], {}), "('No valid folders of images found at ' + image_dir)\n", (49024, 49076), True, 'import tensorflow as tf\n'), ((49140, 49272), 'tensorflow.logging.error', 'tf.logging.error', (["('Only one valid folder of images found at ' + image_dir +\n ' - multiple classes are needed for classification.')"], {}), "('Only one valid folder of images found at ' + image_dir +\n ' - multiple classes are needed for classification.')\n", (49156, 49272), True, 'import tensorflow as tf\n'), ((49579, 49602), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (49589, 49602), True, 'import tensorflow as tf\n'), ((51469, 51491), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (51489, 51491), True, 'import tensorflow as tf\n'), ((51519, 51578), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["(summaries_dir + '/train')", 'sess.graph'], {}), "(summaries_dir + '/train', sess.graph)\n", (51540, 51578), True, 'import tensorflow as tf\n'), ((51661, 51713), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["(summaries_dir + '/validation')"], {}), "(summaries_dir + '/validation')\n", (51682, 51713), True, 'import tensorflow as tf\n'), ((51821, 51854), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (51852, 51854), True, 'import tensorflow as tf\n'), ((6649, 6670), 'tensorflow.python.platform.gfile.Walk', 'gfile.Walk', (['image_dir'], {}), '(image_dir)\n', (6659, 6670), False, 'from tensorflow.python.platform import gfile\n'), ((7365, 7401), 'tensorflow.logging.warning', 'tf.logging.warning', (['"""No files found"""'], {}), "('No files found')\n", (7383, 7401), True, 'import tensorflow as tf\n'), ((7479, 7570), 'tensorflow.logging.warning', 'tf.logging.warning', (['"""WARNING: Folder has less than 20 images, which may cause issues."""'], {}), "(\n 'WARNING: Folder has less than 20 images, which may cause issues.')\n", (7497, 7570), True, 'import tensorflow as tf\n'), ((8532, 8568), 're.sub', 're.sub', (['"""_nohash_.*$"""', '""""""', 'file_name'], {}), "('_nohash_.*$', '', file_name)\n", (8538, 8568), False, 'import re\n'), ((12978, 13011), 'tensorflow.python.platform.gfile.FastGFile', 'gfile.FastGFile', (['model_path', '"""rb"""'], {}), "(model_path, 'rb')\n", (12993, 13011), False, 'from tensorflow.python.platform import gfile\n'), ((13046, 13059), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (13057, 13059), True, 'import tensorflow as tf\n'), ((13171, 13312), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""', 'return_elements': "[model_info['bottleneck_tensor_name'], model_info['resized_input_tensor_name']]"}), "(graph_def, name='', return_elements=[model_info[\n 'bottleneck_tensor_name'], model_info['resized_input_tensor_name']])\n", (13190, 13312), True, 'import tensorflow as tf\n'), ((15716, 15793), 'sys.stdout.write', 'sys.stdout.write', (["('\\r>> Downloading %s %.1f%%' % (filename, progress * 100.0))"], {}), "('\\r>> Downloading %s %.1f%%' % (filename, progress * 100.0))\n", (15732, 15793), False, 'import sys\n'), ((15843, 15861), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (15859, 15861), False, 'import sys\n'), ((16137, 16167), 'tarfile.open', 'tarfile.open', (['filepath', '"""r:gz"""'], {}), "(filepath, 'r:gz')\n", (16149, 16167), False, 'import tarfile\n'), ((17150, 17183), 'tensorflow.python.platform.gfile.FastGFile', 'gfile.FastGFile', (['image_path', '"""rb"""'], {}), "(image_path, 'rb')\n", (17165, 17183), False, 'from tensorflow.python.platform import gfile\n'), ((20410, 20474), 'tensorflow.logging.warning', 'tf.logging.warning', (['"""Invalid float found, recreating bottleneck"""'], {}), "('Invalid float found, recreating bottleneck')\n", (20428, 20474), True, 'import tensorflow as tf\n'), ((26156, 26185), 'random.randrange', 'random.randrange', (['class_count'], {}), '(class_count)\n', (26172, 26185), False, 'import random\n'), ((26283, 26329), 'random.randrange', 'random.randrange', (['(MAX_NUM_IMAGES_PER_CLASS + 1)'], {}), '(MAX_NUM_IMAGES_PER_CLASS + 1)\n', (26299, 26329), False, 'import random\n'), ((26799, 26838), 'numpy.zeros', 'np.zeros', (['class_count'], {'dtype': 'np.float32'}), '(class_count, dtype=np.float32)\n', (26807, 26838), True, 'import numpy as np\n'), ((30012, 30036), 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['image_path'], {}), '(image_path)\n', (30024, 30036), False, 'from tensorflow.python.platform import gfile\n'), ((30054, 30108), 'tensorflow.logging.fatal', 'tf.logging.fatal', (['"""File does not exist %s"""', 'image_path'], {}), "('File does not exist %s', image_path)\n", (30070, 30108), True, 'import tensorflow as tf\n'), ((37079, 37102), 'tensorflow.name_scope', 'tf.name_scope', (['"""stddev"""'], {}), "('stddev')\n", (37092, 37102), True, 'import tensorflow as tf\n'), ((37261, 37279), 'tensorflow.reduce_max', 'tf.reduce_max', (['var'], {}), '(var)\n', (37274, 37279), True, 'import tensorflow as tf\n'), ((37318, 37336), 'tensorflow.reduce_min', 'tf.reduce_min', (['var'], {}), '(var)\n', (37331, 37336), True, 'import tensorflow as tf\n'), ((39182, 39206), 'tensorflow.name_scope', 'tf.name_scope', (['"""weights"""'], {}), "('weights')\n", (39195, 39206), True, 'import tensorflow as tf\n'), ((39240, 39312), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[bottleneck_tensor_size, class_count]'], {'stddev': '(0.001)'}), '([bottleneck_tensor_size, class_count], stddev=0.001)\n', (39259, 39312), True, 'import tensorflow as tf\n'), ((39367, 39415), 'tensorflow.Variable', 'tf.Variable', (['initial_value'], {'name': '"""final_weights"""'}), "(initial_value, name='final_weights')\n", (39378, 39415), True, 'import tensorflow as tf\n'), ((39484, 39507), 'tensorflow.name_scope', 'tf.name_scope', (['"""biases"""'], {}), "('biases')\n", (39497, 39507), True, 'import tensorflow as tf\n'), ((39664, 39690), 'tensorflow.name_scope', 'tf.name_scope', (['"""Wx_plus_b"""'], {}), "('Wx_plus_b')\n", (39677, 39690), True, 'import tensorflow as tf\n'), ((39791, 39838), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""pre_activations"""', 'logits'], {}), "('pre_activations', logits)\n", (39811, 39838), True, 'import tensorflow as tf\n'), ((40157, 40179), 'tensorflow.name_scope', 'tf.name_scope', (['"""total"""'], {}), "('total')\n", (40170, 40179), True, 'import tensorflow as tf\n'), ((40218, 40247), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cross_entropy'], {}), '(cross_entropy)\n', (40232, 40247), True, 'import tensorflow as tf\n'), ((41057, 41092), 'tensorflow.name_scope', 'tf.name_scope', (['"""correct_prediction"""'], {}), "('correct_prediction')\n", (41070, 41092), True, 'import tensorflow as tf\n'), ((41123, 41150), 'tensorflow.argmax', 'tf.argmax', (['result_tensor', '(1)'], {}), '(result_tensor, 1)\n', (41132, 41150), True, 'import tensorflow as tf\n'), ((41282, 41307), 'tensorflow.name_scope', 'tf.name_scope', (['"""accuracy"""'], {}), "('accuracy')\n", (41295, 41307), True, 'import tensorflow as tf\n'), ((45833, 45909), 'tensorflow.logging.error', 'tf.logging.error', (['"""Couldn\'t understand architecture name \'%s\'"""', 'architecture'], {}), '("Couldn\'t understand architecture name \'%s\'", architecture)\n', (45849, 45909), True, 'import tensorflow as tf\n'), ((57268, 57320), 'tensorflow.logging.info', 'tf.logging.info', (['"""=== MISCLASSIFIED TEST IMAGES ==="""'], {}), "('=== MISCLASSIFIED TEST IMAGES ===')\n", (57283, 57320), True, 'import tensorflow as tf\n'), ((57834, 57869), 'tensorflow.python.platform.gfile.FastGFile', 'gfile.FastGFile', (['output_labels', '"""w"""'], {}), "(output_labels, 'w')\n", (57849, 57869), False, 'from tensorflow.python.platform import gfile\n'), ((7296, 7317), 'tensorflow.python.platform.gfile.Glob', 'gfile.Glob', (['file_glob'], {}), '(file_glob)\n', (7306, 7317), False, 'from tensorflow.python.platform import gfile\n'), ((12847, 12857), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (12855, 12857), True, 'import tensorflow as tf\n'), ((27773, 27812), 'numpy.zeros', 'np.zeros', (['class_count'], {'dtype': 'np.float32'}), '(class_count, dtype=np.float32)\n', (27781, 27812), True, 'import numpy as np\n'), ((30133, 30166), 'tensorflow.python.platform.gfile.FastGFile', 'gfile.FastGFile', (['image_path', '"""rb"""'], {}), "(image_path, 'rb')\n", (30148, 30166), False, 'from tensorflow.python.platform import gfile\n'), ((39552, 39575), 'tensorflow.zeros', 'tf.zeros', (['[class_count]'], {}), '([class_count])\n', (39560, 39575), True, 'import tensorflow as tf\n'), ((39717, 39759), 'tensorflow.matmul', 'tf.matmul', (['bottleneck_input', 'layer_weights'], {}), '(bottleneck_input, layer_weights)\n', (39726, 39759), True, 'import tensorflow as tf\n'), ((41230, 41263), 'tensorflow.argmax', 'tf.argmax', (['ground_truth_tensor', '(1)'], {}), '(ground_truth_tensor, 1)\n', (41239, 41263), True, 'import tensorflow as tf\n'), ((41358, 41397), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (41365, 41397), True, 'import tensorflow as tf\n'), ((43629, 43705), 'tensorflow.logging.error', 'tf.logging.error', (['"""Couldn\'t understand architecture name \'%s\'"""', 'architecture'], {}), '("Couldn\'t understand architecture name \'%s\'", architecture)\n', (43645, 43705), True, 'import tensorflow as tf\n'), ((43974, 44153), 'tensorflow.logging.error', 'tf.logging.error', (['""""The Mobilenet version should be \'1.0\', \'0.75\', \'0.50\', or \'0.25\',\n but found \'%s\' for architecture \'%s\'"""', 'version_string', 'architecture'], {}), '(\n """"The Mobilenet version should be \'1.0\', \'0.75\', \'0.50\', or \'0.25\',\n but found \'%s\' for architecture \'%s\'"""\n , version_string, architecture)\n', (43990, 44153), True, 'import tensorflow as tf\n'), ((44402, 44576), 'tensorflow.logging.error', 'tf.logging.error', (['"""The Mobilenet input size should be \'224\', \'192\', \'160\', or \'128\',\n but found \'%s\' for architecture \'%s\'"""', 'size_string', 'architecture'], {}), '(\n """The Mobilenet input size should be \'224\', \'192\', \'160\', or \'128\',\n but found \'%s\' for architecture \'%s\'"""\n , size_string, architecture)\n', (44418, 44576), True, 'import tensorflow as tf\n'), ((55988, 56062), 'tensorflow.logging.info', 'tf.logging.info', (["('Save intermediate result to : ' + intermediate_file_name)"], {}), "('Save intermediate result to : ' + intermediate_file_name)\n", (56003, 56062), True, 'import tensorflow as tf\n'), ((37152, 37173), 'tensorflow.square', 'tf.square', (['(var - mean)'], {}), '(var - mean)\n', (37161, 37173), True, 'import tensorflow as tf\n'), ((44787, 44888), 'tensorflow.logging.error', 'tf.logging.error', (['"""Couldn\'t understand architecture suffix \'%s\' for \'%s\'"""', 'parts[3]', 'architecture'], {}), '("Couldn\'t understand architecture suffix \'%s\' for \'%s\'",\n parts[3], architecture)\n', (44803, 44888), True, 'import tensorflow as tf\n'), ((9143, 9169), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['hash_name'], {}), '(hash_name)\n', (9158, 9169), False, 'from tensorflow.python.util import compat\n'), ((54232, 54246), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (54244, 54246), False, 'from datetime import datetime\n'), ((54383, 54397), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (54395, 54397), False, 'from datetime import datetime\n'), ((55462, 55476), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (55474, 55476), False, 'from datetime import datetime\n')] |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for GNN."""
import os
from models import GAT
from models import GCN
import numpy as np
import scipy.sparse as sp
import tensorflow as tf
def build_model(model_name, num_layers, hidden_dim, num_classes, dropout_rate,
num_heads, sparse):
"""Create gnn model and initialize parameters weights."""
# Convert hidden_dim to integers
for i in range(len(hidden_dim)):
hidden_dim[i] = int(hidden_dim[i])
# Only GCN and GAT are available.
if model_name == 'gcn':
model = GCN(
num_layers=num_layers,
hidden_dim=hidden_dim,
num_classes=num_classes,
dropout_rate=dropout_rate,
sparse=sparse,
bias=True)
elif model_name == 'gat':
model = GAT(
num_layers=num_layers,
hidden_dim=hidden_dim,
num_classes=num_classes,
dropout_rate=dropout_rate,
num_heads=num_heads,
sparse=sparse)
return model
def cal_acc(labels, logits):
indices = tf.math.argmax(logits, axis=1)
acc = tf.math.reduce_mean(tf.cast(indices == labels, dtype=tf.float32))
return acc.numpy().item()
def encode_onehot(labels):
"""Provides a mapping from string labels to integer indices."""
label_index = {
'Case_Based': 0,
'Genetic_Algorithms': 1,
'Neural_Networks': 2,
'Probabilistic_Methods': 3,
'Reinforcement_Learning': 4,
'Rule_Learning': 5,
'Theory': 6,
}
# Convert to onehot label
num_classes = len(label_index)
onehot_labels = np.zeros((len(labels), num_classes))
idx = 0
for s in labels:
onehot_labels[idx, label_index[s]] = 1
idx += 1
return onehot_labels
def normalize_adj_matrix(adj):
"""Normalize adjacency matrix."""
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def normalize_features(features):
"""Row-normalize feature matrix."""
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return features
def sparse_matrix_to_tf_sparse_tensor(matrix):
"""Convert scipy sparse matrix to `tf.sparse.SparseTensor`."""
sp_matrix = matrix.tocoo().astype(np.float32)
indices = tf.convert_to_tensor(
np.vstack((sp_matrix.row, sp_matrix.col)).T.astype(np.int64))
values = tf.convert_to_tensor(sp_matrix.data)
shape = tf.TensorShape(sp_matrix.shape)
return tf.sparse.SparseTensor(indices, values, shape)
def load_dataset(dataset, sparse_features, normalize_adj):
"""Loads Cora dataset."""
dir_path = os.path.join('data', dataset)
content_path = os.path.join(dir_path, '{}.content'.format(dataset))
citation_path = os.path.join(dir_path, '{}.cites'.format(dataset))
content = np.genfromtxt(content_path, dtype=np.dtype(str))
idx = np.array(content[:, 0], dtype=np.int32)
features = sp.csr_matrix(content[:, 1:-1], dtype=np.float32)
labels = encode_onehot(content[:, -1])
# Dict which maps paper id to data id
idx_map = {j: i for i, j in enumerate(idx)}
edges_unordered = np.genfromtxt(citation_path, dtype=np.int32)
edges = np.array(
list(map(idx_map.get, edges_unordered.flatten())),
dtype=np.int32).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(labels.shape[0], labels.shape[0]),
dtype=np.float32)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
# Add self-connection edge
adj = adj + sp.eye(adj.shape[0])
features = normalize_features(features)
if normalize_adj:
adj = normalize_adj_matrix(adj)
# 5% for train, 300 for validation, 1000 for test
idx_train = slice(140)
idx_val = slice(200, 500)
idx_test = slice(500, 1500)
features = tf.convert_to_tensor(np.array(features.todense()))
labels = tf.convert_to_tensor(np.where(labels)[1])
if sparse_features:
adj = sparse_matrix_to_tf_sparse_tensor(adj)
else:
adj = tf.convert_to_tensor(np.array(adj.todense()))
return adj, features, labels, idx_train, idx_val, idx_test
| [
"scipy.sparse.diags",
"tensorflow.math.argmax",
"tensorflow.convert_to_tensor",
"models.GCN",
"models.GAT",
"tensorflow.TensorShape",
"numpy.genfromtxt",
"numpy.power",
"numpy.dtype",
"numpy.ones",
"tensorflow.cast",
"scipy.sparse.csr_matrix",
"numpy.array",
"numpy.where",
"numpy.vstack"... | [((1550, 1580), 'tensorflow.math.argmax', 'tf.math.argmax', (['logits'], {'axis': '(1)'}), '(logits, axis=1)\n', (1564, 1580), True, 'import tensorflow as tf\n'), ((2389, 2409), 'scipy.sparse.diags', 'sp.diags', (['d_inv_sqrt'], {}), '(d_inv_sqrt)\n', (2397, 2409), True, 'import scipy.sparse as sp\n'), ((2649, 2664), 'scipy.sparse.diags', 'sp.diags', (['r_inv'], {}), '(r_inv)\n', (2657, 2664), True, 'import scipy.sparse as sp\n'), ((2995, 3031), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['sp_matrix.data'], {}), '(sp_matrix.data)\n', (3015, 3031), True, 'import tensorflow as tf\n'), ((3042, 3073), 'tensorflow.TensorShape', 'tf.TensorShape', (['sp_matrix.shape'], {}), '(sp_matrix.shape)\n', (3056, 3073), True, 'import tensorflow as tf\n'), ((3083, 3129), 'tensorflow.sparse.SparseTensor', 'tf.sparse.SparseTensor', (['indices', 'values', 'shape'], {}), '(indices, values, shape)\n', (3105, 3129), True, 'import tensorflow as tf\n'), ((3232, 3261), 'os.path.join', 'os.path.join', (['"""data"""', 'dataset'], {}), "('data', dataset)\n", (3244, 3261), False, 'import os\n'), ((3472, 3511), 'numpy.array', 'np.array', (['content[:, 0]'], {'dtype': 'np.int32'}), '(content[:, 0], dtype=np.int32)\n', (3480, 3511), True, 'import numpy as np\n'), ((3525, 3574), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['content[:, 1:-1]'], {'dtype': 'np.float32'}), '(content[:, 1:-1], dtype=np.float32)\n', (3538, 3574), True, 'import scipy.sparse as sp\n'), ((3723, 3767), 'numpy.genfromtxt', 'np.genfromtxt', (['citation_path'], {'dtype': 'np.int32'}), '(citation_path, dtype=np.int32)\n', (3736, 3767), True, 'import numpy as np\n'), ((1087, 1218), 'models.GCN', 'GCN', ([], {'num_layers': 'num_layers', 'hidden_dim': 'hidden_dim', 'num_classes': 'num_classes', 'dropout_rate': 'dropout_rate', 'sparse': 'sparse', 'bias': '(True)'}), '(num_layers=num_layers, hidden_dim=hidden_dim, num_classes=num_classes,\n dropout_rate=dropout_rate, sparse=sparse, bias=True)\n', (1090, 1218), False, 'from models import GCN\n'), ((1609, 1653), 'tensorflow.cast', 'tf.cast', (['(indices == labels)'], {'dtype': 'tf.float32'}), '(indices == labels, dtype=tf.float32)\n', (1616, 1653), True, 'import tensorflow as tf\n'), ((4230, 4250), 'scipy.sparse.eye', 'sp.eye', (['adj.shape[0]'], {}), '(adj.shape[0])\n', (4236, 4250), True, 'import scipy.sparse as sp\n'), ((1304, 1445), 'models.GAT', 'GAT', ([], {'num_layers': 'num_layers', 'hidden_dim': 'hidden_dim', 'num_classes': 'num_classes', 'dropout_rate': 'dropout_rate', 'num_heads': 'num_heads', 'sparse': 'sparse'}), '(num_layers=num_layers, hidden_dim=hidden_dim, num_classes=num_classes,\n dropout_rate=dropout_rate, num_heads=num_heads, sparse=sparse)\n', (1307, 1445), False, 'from models import GAT\n'), ((2337, 2359), 'numpy.power', 'np.power', (['rowsum', '(-0.5)'], {}), '(rowsum, -0.5)\n', (2345, 2359), True, 'import numpy as np\n'), ((2604, 2624), 'numpy.power', 'np.power', (['rowsum', '(-1)'], {}), '(rowsum, -1)\n', (2612, 2624), True, 'import numpy as np\n'), ((3448, 3461), 'numpy.dtype', 'np.dtype', (['str'], {}), '(str)\n', (3456, 3461), True, 'import numpy as np\n'), ((3921, 3944), 'numpy.ones', 'np.ones', (['edges.shape[0]'], {}), '(edges.shape[0])\n', (3928, 3944), True, 'import numpy as np\n'), ((4583, 4599), 'numpy.where', 'np.where', (['labels'], {}), '(labels)\n', (4591, 4599), True, 'import numpy as np\n'), ((2922, 2963), 'numpy.vstack', 'np.vstack', (['(sp_matrix.row, sp_matrix.col)'], {}), '((sp_matrix.row, sp_matrix.col))\n', (2931, 2963), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""
Dans une grille en tore (pacman) privilégie les co-linéarités à angles triangulaires.
On fait passer la contrainte par une vague spatiale exogene (prédeterminée, pas émergente)
"""
import sys
if len(sys.argv)>1: mode = sys.argv[1]
else: mode = 'both'
import elasticite as el
import numpy as np
class EdgeGrid(el.EdgeGrid):
def champ(self):
force = np.zeros_like(self.lames[2, :])
noise = lambda t, x: 2* np.exp((np.cos(2*np.pi*((t-0.) / 6. + x))-1.)/ .1**2)
damp = lambda t: 0.001 #* np.exp(np.cos(t / 6.) / 3.**2)
colin_t = lambda t, y: -8.*np.exp((np.cos(2*np.pi*((t-3.) / 6. + y))-1.)/ .3**2)
colin_d = lambda d: np.exp(-d/.05) #np.exp(-np.log((d+1.e-12)/.05)**2/2/1.5)
#delta_angle = np.mod(self.angle_relatif()-np.pi/3., 2*np.pi/3)
delta_angle = self.angle_relatif()-np.pi/3.
#delta_angle *= np.sign(delta_angle)
force += colin_t(self.t, self.lames[1, :]) * np.sum(np.sin(6*delta_angle)*colin_d(self.distance(do_torus=True)), axis=1)
force += noise(self.t, self.lames[0, :])*np.pi*np.random.randn(self.N_lame)
force -= damp(self.t) * self.lames[3, :]/self.dt
return 100. * force
e = EdgeGrid(mode=mode)
el.main(e)
| [
"numpy.zeros_like",
"numpy.random.randn",
"numpy.sin",
"numpy.exp",
"numpy.cos",
"elasticite.main"
] | [((1264, 1274), 'elasticite.main', 'el.main', (['e'], {}), '(e)\n', (1271, 1274), True, 'import elasticite as el\n'), ((414, 445), 'numpy.zeros_like', 'np.zeros_like', (['self.lames[2, :]'], {}), '(self.lames[2, :])\n', (427, 445), True, 'import numpy as np\n'), ((714, 731), 'numpy.exp', 'np.exp', (['(-d / 0.05)'], {}), '(-d / 0.05)\n', (720, 731), True, 'import numpy as np\n'), ((1125, 1153), 'numpy.random.randn', 'np.random.randn', (['self.N_lame'], {}), '(self.N_lame)\n', (1140, 1153), True, 'import numpy as np\n'), ((1001, 1024), 'numpy.sin', 'np.sin', (['(6 * delta_angle)'], {}), '(6 * delta_angle)\n', (1007, 1024), True, 'import numpy as np\n'), ((486, 527), 'numpy.cos', 'np.cos', (['(2 * np.pi * ((t - 0.0) / 6.0 + x))'], {}), '(2 * np.pi * ((t - 0.0) / 6.0 + x))\n', (492, 527), True, 'import numpy as np\n'), ((640, 681), 'numpy.cos', 'np.cos', (['(2 * np.pi * ((t - 3.0) / 6.0 + y))'], {}), '(2 * np.pi * ((t - 3.0) / 6.0 + y))\n', (646, 681), True, 'import numpy as np\n')] |
""" @package forcebalance.opt_geo_target Optimized Geometry fitting module.
@author <NAME>, <NAME>
@date 03/2019
"""
from __future__ import division
import os
import shutil
import numpy as np
import re
import subprocess
from collections import OrderedDict, defaultdict
from forcebalance.nifty import col, eqcgmx, flat, floatornan, fqcgmx, invert_svd, kb, printcool, printcool_dictionary, bohr2ang, warn_press_key
from forcebalance.target import Target
from forcebalance.molecule import Molecule
from forcebalance.finite_difference import fdwrap, f1d2p, f12d3p, in_fd
from forcebalance.output import getLogger
logger = getLogger(__name__)
RADIAN_2_DEGREE = 180 / np.pi
def periodic_diff(a, b, v_periodic):
''' convenient function for computing the minimum difference in periodic coordinates
Parameters
----------
a: np.ndarray or float
Reference values in a numpy array
b: np.ndarray or float
Target values in a numpy arrary
v_periodic: float > 0
Value of the periodic boundary
Returns
-------
diff: np.ndarray
The array of same shape containing the difference between a and b
All return values are in range [-v_periodic/2, v_periodic/2),
"( )" means exclusive, "[ ]" means inclusive
Examples
-------
periodic_diff(0.0, 2.1, 2.0) => -0.1
periodic_diff(0.0, 1.9, 2.0) => 0.1
periodic_diff(0.0, 1.0, 2.0) => -1.0
periodic_diff(1.0, 0.0, 2.0) => -1.0
periodic_diff(1.0, 0.1, 2.0) => -0.9
periodic_diff(1.0, 10.1, 2.0) => 0.9
periodic_diff(1.0, 9.9, 2.0) => -0.9
'''
assert v_periodic > 0
h = 0.5 * v_periodic
return (a - b + h) % v_periodic - h
def compute_rmsd(ref, tar, v_periodic=None):
"""
Compute the RMSD between two arrays, supporting periodic difference
"""
assert len(ref) == len(tar), 'array length must match'
n = len(ref)
if n == 0: return 0.0
if v_periodic is not None:
diff = periodic_diff(ref, tar, v_periodic)
else:
diff = ref - tar
rmsd = np.sqrt(np.sum(diff**2) / n)
return rmsd
class OptGeoTarget(Target):
""" Subclass of Target for fitting MM optimized geometries to QM optimized geometries. """
def __init__(self,options,tgt_opts,forcefield):
super(OptGeoTarget,self).__init__(options,tgt_opts,forcefield)
self.set_option(None, None, 'optgeo_options', os.path.join(self.tgtdir,tgt_opts['optgeo_options_txt']))
self.sys_opts = self.parse_optgeo_options(self.optgeo_options)
## Build keyword dictionaries to pass to engine.
engine_args = OrderedDict(list(self.OptionDict.items()) + list(options.items()))
engine_args.pop('name', None)
## Create engine objects
self.create_engines(engine_args)
## Create internal coordinates
self._build_internal_coordinates()
## Option for how much data to write to disk.
self.set_option(tgt_opts,'writelevel','writelevel')
def create_engines(self, engine_args):
raise NotImplementedError("create_engines() should be implemented in subclass")
@staticmethod
def parse_optgeo_options(filename):
""" Parse an optgeo_options.txt file into specific OptGeoTarget Target Options"""
logger.info("Reading optgeo options from file: %s\n" % filename)
global_opts = OrderedDict()
sys_opts = OrderedDict()
section = None
section_opts = OrderedDict()
with open(filename) as f:
for ln, line in enumerate(f, 1):
# Anything after "#" is a comment
line = line.split("#", maxsplit=1)[0].strip()
if not line: continue
ls = line.split()
key = ls[0].lower()
if key[0] == "$":
# section sign $
if key == '$end':
if section is None:
warn_press_key("Line %i: Encountered $end before any section." % ln)
elif section == 'global':
# global options read finish
global_opts = section_opts
elif section == 'system':
# check if system section contains name
if 'name' not in section_opts:
warn_press_key("Line %i: You need to specify a name for the system section ending." % ln)
elif section_opts['name'] in sys_opts:
warn_press_key("Line %i: A system named %s already exists in Systems" % (ln, section_opts['name']))
else:
sys_opts[section_opts['name']] = section_opts
section = None
section_opts = OrderedDict()
else:
if section is not None:
warn_press_key("Line %i: Encountered section start %s before previous section $end." % (ln, key))
if key == '$global':
section = 'global'
elif key == '$system':
section = 'system'
else:
warn_press_key("Line %i: Encountered unsupported section name %s " % (ln, key))
else:
# put normal key-value options into section_opts
if key in ['name', 'geometry', 'topology']:
if len(ls) != 2:
warn_press_key("Line %i: one value expected for key %s" % (ln, key))
if section == 'global':
warn_press_key("Line %i: key %s should not appear in $global section" % (ln, key))
section_opts[key] = ls[1]
elif key in ['bond_denom', 'angle_denom', 'dihedral_denom', 'improper_denom']:
if len(ls) != 2:
warn_press_key("Line %i: one value expected for key %s" % (ln, key))
section_opts[key] = float(ls[1])
elif key == 'mol2':
# special parsing for mol2 option for SMIRNOFF engine
# the value is a list of filenames
section_opts[key] = ls[1:]
# apply a few default global options
global_opts.setdefault('bond_denom', 0.02)
global_opts.setdefault('angle_denom', 3)
global_opts.setdefault('dihedral_denom', 10.0)
global_opts.setdefault('improper_denom', 10.0)
# copy global options into each system
for sys_name, sys_opt_dict in sys_opts.items():
for k,v in global_opts.items():
# do not overwrite system options
sys_opt_dict.setdefault(k, v)
for k in ['name', 'geometry', 'topology']:
if k not in sys_opt_dict:
warn_press_key("key %s missing in system section named %s" %(k, sys_name))
return sys_opts
def _build_internal_coordinates(self):
"Build internal coordinates system with geometric.internal.PrimitiveInternalCoordinates"
# geometric module is imported to build internal coordinates
# importing here will avoid import error for calculations not using this target
from geometric.internal import PrimitiveInternalCoordinates, Distance, Angle, Dihedral, OutOfPlane
self.internal_coordinates = OrderedDict()
for sysname, sysopt in self.sys_opts.items():
geofile = os.path.join(self.root, self.tgtdir, sysopt['geometry'])
topfile = os.path.join(self.root, self.tgtdir, sysopt['topology'])
# logger.info("Building internal coordinates from file: %s\n" % topfile)
m0 = Molecule(geofile)
m = Molecule(topfile)
p_IC = PrimitiveInternalCoordinates(m)
# here we explicitly pick the bonds, angles and dihedrals to evaluate
ic_bonds, ic_angles, ic_dihedrals, ic_impropers = [], [], [], []
for ic in p_IC.Internals:
if isinstance(ic, Distance):
ic_bonds.append(ic)
elif isinstance(ic, Angle):
ic_angles.append(ic)
elif isinstance(ic, Dihedral):
ic_dihedrals.append(ic)
elif isinstance(ic, OutOfPlane):
ic_impropers.append(ic)
# compute and store reference values
pos_ref = m0.xyzs[0]
vref_bonds = np.array([ic.value(pos_ref) for ic in ic_bonds])
vref_angles = np.array([ic.value(pos_ref)*RADIAN_2_DEGREE for ic in ic_angles])
vref_dihedrals = np.array([ic.value(pos_ref)*RADIAN_2_DEGREE for ic in ic_dihedrals])
vref_impropers = np.array([ic.value(pos_ref)*RADIAN_2_DEGREE for ic in ic_impropers])
self.internal_coordinates[sysname] = {
'ic_bonds': ic_bonds,
'ic_angles': ic_angles,
'ic_dihedrals': ic_dihedrals,
'ic_impropers': ic_impropers,
'vref_bonds': vref_bonds,
'vref_angles': vref_angles,
'vref_dihedrals': vref_dihedrals,
'vref_impropers': vref_impropers,
}
def system_driver(self, sysname):
""" Run calculation for one system, return internal coordinate values after optimization """
engine = self.engines[sysname]
ic_dict = self.internal_coordinates[sysname]
if engine.__class__.__name__ in ('OpenMM', 'SMIRNOFF'):
# OpenMM.optimize() by default resets geometry to initial geometry before optimization
engine.optimize()
pos = engine.getContextPosition()
else:
raise NotImplementedError("system_driver() not implemented for %s" % engine.__name__)
v_ic = {
'bonds': np.array([ic.value(pos) for ic in ic_dict['ic_bonds']]),
'angles': np.array([ic.value(pos)*RADIAN_2_DEGREE for ic in ic_dict['ic_angles']]),
'dihedrals': np.array([ic.value(pos)*RADIAN_2_DEGREE for ic in ic_dict['ic_dihedrals']]),
'impropers': np.array([ic.value(pos)*RADIAN_2_DEGREE for ic in ic_dict['ic_impropers']]),
}
return v_ic
def indicate(self):
title_str = "%s, Objective = % .5e" % (self.name, self.objective)
#QYD: This title is carefully placed to align correctly
column_head_str1 = " %-20s %13s %13s %15s %15s %17s " % ("System", "Bonds", "Angles", "Dihedrals", "Impropers", "Term.")
column_head_str2 = " %-20s %9s %7s %9s %7s %9s %7s %9s %7s %17s " % ('', 'RMSD', 'denom', 'RMSD', 'denom', 'RMSD', 'denom', 'RMSD', 'denom', '')
printcool_dictionary(self.PrintDict,title=title_str + '\n' + column_head_str1 + '\n' + column_head_str2, center=[True,False,False])
def get(self, mvals, AGrad=False, AHess=False):
Answer = {'X':0.0, 'G':np.zeros(self.FF.np), 'H':np.zeros((self.FF.np, self.FF.np))}
self.PrintDict = OrderedDict()
# enable self.system_mval_masks (supported by OptGeoTarget_SMIRNOFF)
enable_system_mval_mask = hasattr(self, 'system_mval_masks')
def compute(mvals, p_idx=None):
''' Compute total objective value for each system '''
self.FF.make(mvals)
v_obj_list = []
for sysname, sysopt in self.sys_opts.items():
# ref values of each type
vref_bonds = self.internal_coordinates[sysname]['vref_bonds']
vref_angles = self.internal_coordinates[sysname]['vref_angles']
vref_dihedrals = self.internal_coordinates[sysname]['vref_dihedrals']
vref_impropers = self.internal_coordinates[sysname]['vref_impropers']
# counts of each type
n_bonds = len(vref_bonds)
n_angles = len(vref_angles)
n_dihedrals = len(vref_dihedrals)
n_impropers = len(vref_impropers)
# use self.system_mval_masks to skip evaluations when computing gradients
if enable_system_mval_mask and in_fd() and (p_idx is not None) and (self.system_mval_masks[sysname][p_idx] == False):
v_obj_list += [0] * (n_bonds + n_angles + n_dihedrals + n_impropers)
continue
# read denominators from system options
bond_denom = sysopt['bond_denom']
angle_denom = sysopt['angle_denom']
dihedral_denom = sysopt['dihedral_denom']
improper_denom = sysopt['improper_denom']
# inverse demon to be scaling factors, 0 for denom 0
scale_bond = 1.0 / bond_denom if bond_denom != 0 else 0.0
scale_angle = 1.0 / angle_denom if angle_denom != 0 else 0.0
scale_dihedral = 1.0 / dihedral_denom if dihedral_denom != 0 else 0.0
scale_improper = 1.0 / improper_denom if improper_denom != 0 else 0.0
# calculate new internal coordinates
v_ic = self.system_driver(sysname)
# objective contribution from bonds
vtar_bonds = v_ic['bonds']
diff_bond = ((vref_bonds - vtar_bonds) * scale_bond).tolist() if n_bonds > 0 else []
# objective contribution from angles
vtar_angles = v_ic['angles']
diff_angle = (periodic_diff(vref_angles, vtar_angles, 360) * scale_angle).tolist() if n_angles > 0 else []
# objective contribution from dihedrals
vtar_dihedrals = v_ic['dihedrals']
diff_dihedral = (periodic_diff(vref_dihedrals, vtar_dihedrals, 360) * scale_dihedral).tolist() if n_dihedrals > 0 else []
# objective contribution from improper dihedrals
vtar_impropers = v_ic['impropers']
diff_improper = (periodic_diff(vref_impropers, vtar_impropers, 360) * scale_improper).tolist() if n_impropers > 0 else []
# combine objective values into a big result list
sys_obj_list = diff_bond + diff_angle + diff_dihedral + diff_improper
# extend the result v_obj_list by individual terms in this system
v_obj_list += sys_obj_list
# save print string
if not in_fd():
# For printing, we group the RMSD by type
rmsd_bond = compute_rmsd(vref_bonds, vtar_bonds)
rmsd_angle = compute_rmsd(vref_angles, vtar_angles, v_periodic=360)
rmsd_dihedral = compute_rmsd(vref_dihedrals, vtar_dihedrals, v_periodic=360)
rmsd_improper = compute_rmsd(vref_impropers, vtar_impropers, v_periodic=360)
obj_total = sum(v**2 for v in sys_obj_list)
self.PrintDict[sysname] = "% 9.3f % 7.2f % 9.3f % 7.2f % 9.3f % 7.2f % 9.3f % 7.2f %17.3f" % (rmsd_bond, \
bond_denom, rmsd_angle, angle_denom, rmsd_dihedral, dihedral_denom, rmsd_improper, improper_denom, obj_total)
return np.array(v_obj_list, dtype=float)
V = compute(mvals)
Answer['X'] = np.dot(V,V)
# write objective decomposition if wanted
if self.writelevel > 0:
# recover mvals
self.FF.make(mvals)
with open('rmsd_decomposition.txt', 'w') as fout:
for sysname in self.internal_coordinates:
fout.write("\n[ %s ]\n" % sysname)
fout.write('%-25s %15s %15s %15s\n' % ("Internal Coordinate", "Ref QM Value", "Cur MM Value", "Difference"))
# reference data
sys_data = self.internal_coordinates[sysname]
sys_data['ic_bonds']
# compute all internal coordinate values again
v_ic = self.system_driver(sysname)
for p in ['bonds', 'angles', 'dihedrals', 'impropers']:
fout.write('--- ' + p + ' ---\n')
ic_list = sys_data['ic_' + p]
ref_v = sys_data['vref_' + p]
tar_v = v_ic[p]
# print each value
for ic, v1, v2 in zip(ic_list, ref_v, tar_v):
diff = periodic_diff(v1, v2, v_periodic=360) if p != 'bonds' else v1-v2
fout.write('%-25s %15.5f %15.5f %+15.3e\n' % (ic, v1, v2, diff))
# compute gradients and hessian
dV = np.zeros((self.FF.np,len(V)))
if AGrad or AHess:
for p in self.pgrad:
dV[p,:], _ = f12d3p(fdwrap(compute, mvals, p, p_idx = p), h = self.h, f0 = V)
for p in self.pgrad:
Answer['G'][p] = 2*np.dot(V, dV[p,:])
for q in self.pgrad:
Answer['H'][p,q] = 2*np.dot(dV[p,:], dV[q,:])
if not in_fd():
self.objective = Answer['X']
self.FF.make(mvals)
return Answer
| [
"forcebalance.nifty.printcool_dictionary",
"numpy.sum",
"numpy.zeros",
"forcebalance.output.getLogger",
"forcebalance.molecule.Molecule",
"numpy.array",
"forcebalance.finite_difference.in_fd",
"forcebalance.finite_difference.fdwrap",
"collections.OrderedDict",
"numpy.dot",
"forcebalance.nifty.wa... | [((619, 638), 'forcebalance.output.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (628, 638), False, 'from forcebalance.output import getLogger\n'), ((3351, 3364), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3362, 3364), False, 'from collections import OrderedDict, defaultdict\n'), ((3384, 3397), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3395, 3397), False, 'from collections import OrderedDict, defaultdict\n'), ((3444, 3457), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3455, 3457), False, 'from collections import OrderedDict, defaultdict\n'), ((7574, 7587), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7585, 7587), False, 'from collections import OrderedDict, defaultdict\n'), ((10896, 11034), 'forcebalance.nifty.printcool_dictionary', 'printcool_dictionary', (['self.PrintDict'], {'title': "(title_str + '\\n' + column_head_str1 + '\\n' + column_head_str2)", 'center': '[True, False, False]'}), "(self.PrintDict, title=title_str + '\\n' +\n column_head_str1 + '\\n' + column_head_str2, center=[True, False, False])\n", (10916, 11034), False, 'from forcebalance.nifty import col, eqcgmx, flat, floatornan, fqcgmx, invert_svd, kb, printcool, printcool_dictionary, bohr2ang, warn_press_key\n'), ((11199, 11212), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (11210, 11212), False, 'from collections import OrderedDict, defaultdict\n'), ((15393, 15405), 'numpy.dot', 'np.dot', (['V', 'V'], {}), '(V, V)\n', (15399, 15405), True, 'import numpy as np\n'), ((2054, 2071), 'numpy.sum', 'np.sum', (['(diff ** 2)'], {}), '(diff ** 2)\n', (2060, 2071), True, 'import numpy as np\n'), ((2392, 2449), 'os.path.join', 'os.path.join', (['self.tgtdir', "tgt_opts['optgeo_options_txt']"], {}), "(self.tgtdir, tgt_opts['optgeo_options_txt'])\n", (2404, 2449), False, 'import os\n'), ((7664, 7720), 'os.path.join', 'os.path.join', (['self.root', 'self.tgtdir', "sysopt['geometry']"], {}), "(self.root, self.tgtdir, sysopt['geometry'])\n", (7676, 7720), False, 'import os\n'), ((7743, 7799), 'os.path.join', 'os.path.join', (['self.root', 'self.tgtdir', "sysopt['topology']"], {}), "(self.root, self.tgtdir, sysopt['topology'])\n", (7755, 7799), False, 'import os\n'), ((7902, 7919), 'forcebalance.molecule.Molecule', 'Molecule', (['geofile'], {}), '(geofile)\n', (7910, 7919), False, 'from forcebalance.molecule import Molecule\n'), ((7936, 7953), 'forcebalance.molecule.Molecule', 'Molecule', (['topfile'], {}), '(topfile)\n', (7944, 7953), False, 'from forcebalance.molecule import Molecule\n'), ((7973, 8004), 'geometric.internal.PrimitiveInternalCoordinates', 'PrimitiveInternalCoordinates', (['m'], {}), '(m)\n', (8001, 8004), False, 'from geometric.internal import PrimitiveInternalCoordinates, Distance, Angle, Dihedral, OutOfPlane\n'), ((11112, 11132), 'numpy.zeros', 'np.zeros', (['self.FF.np'], {}), '(self.FF.np)\n', (11120, 11132), True, 'import numpy as np\n'), ((11138, 11172), 'numpy.zeros', 'np.zeros', (['(self.FF.np, self.FF.np)'], {}), '((self.FF.np, self.FF.np))\n', (11146, 11172), True, 'import numpy as np\n'), ((15309, 15342), 'numpy.array', 'np.array', (['v_obj_list'], {'dtype': 'float'}), '(v_obj_list, dtype=float)\n', (15317, 15342), True, 'import numpy as np\n'), ((17133, 17140), 'forcebalance.finite_difference.in_fd', 'in_fd', ([], {}), '()\n', (17138, 17140), False, 'from forcebalance.finite_difference import fdwrap, f1d2p, f12d3p, in_fd\n'), ((17004, 17023), 'numpy.dot', 'np.dot', (['V', 'dV[p, :]'], {}), '(V, dV[p, :])\n', (17010, 17023), True, 'import numpy as np\n'), ((7034, 7109), 'forcebalance.nifty.warn_press_key', 'warn_press_key', (["('key %s missing in system section named %s' % (k, sys_name))"], {}), "('key %s missing in system section named %s' % (k, sys_name))\n", (7048, 7109), False, 'from forcebalance.nifty import col, eqcgmx, flat, floatornan, fqcgmx, invert_svd, kb, printcool, printcool_dictionary, bohr2ang, warn_press_key\n'), ((12316, 12323), 'forcebalance.finite_difference.in_fd', 'in_fd', ([], {}), '()\n', (12321, 12323), False, 'from forcebalance.finite_difference import fdwrap, f1d2p, f12d3p, in_fd\n'), ((14543, 14550), 'forcebalance.finite_difference.in_fd', 'in_fd', ([], {}), '()\n', (14548, 14550), False, 'from forcebalance.finite_difference import fdwrap, f1d2p, f12d3p, in_fd\n'), ((16885, 16919), 'forcebalance.finite_difference.fdwrap', 'fdwrap', (['compute', 'mvals', 'p'], {'p_idx': 'p'}), '(compute, mvals, p, p_idx=p)\n', (16891, 16919), False, 'from forcebalance.finite_difference import fdwrap, f1d2p, f12d3p, in_fd\n'), ((17093, 17119), 'numpy.dot', 'np.dot', (['dV[p, :]', 'dV[q, :]'], {}), '(dV[p, :], dV[q, :])\n', (17099, 17119), True, 'import numpy as np\n'), ((4857, 4870), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4868, 4870), False, 'from collections import OrderedDict, defaultdict\n'), ((3938, 4006), 'forcebalance.nifty.warn_press_key', 'warn_press_key', (["('Line %i: Encountered $end before any section.' % ln)"], {}), "('Line %i: Encountered $end before any section.' % ln)\n", (3952, 4006), False, 'from forcebalance.nifty import col, eqcgmx, flat, floatornan, fqcgmx, invert_svd, kb, printcool, printcool_dictionary, bohr2ang, warn_press_key\n'), ((4973, 5079), 'forcebalance.nifty.warn_press_key', 'warn_press_key', (["('Line %i: Encountered section start %s before previous section $end.' % (\n ln, key))"], {}), "(\n 'Line %i: Encountered section start %s before previous section $end.' %\n (ln, key))\n", (4987, 5079), False, 'from forcebalance.nifty import col, eqcgmx, flat, floatornan, fqcgmx, invert_svd, kb, printcool, printcool_dictionary, bohr2ang, warn_press_key\n'), ((5619, 5687), 'forcebalance.nifty.warn_press_key', 'warn_press_key', (["('Line %i: one value expected for key %s' % (ln, key))"], {}), "('Line %i: one value expected for key %s' % (ln, key))\n", (5633, 5687), False, 'from forcebalance.nifty import col, eqcgmx, flat, floatornan, fqcgmx, invert_svd, kb, printcool, printcool_dictionary, bohr2ang, warn_press_key\n'), ((5764, 5850), 'forcebalance.nifty.warn_press_key', 'warn_press_key', (["('Line %i: key %s should not appear in $global section' % (ln, key))"], {}), "('Line %i: key %s should not appear in $global section' % (ln,\n key))\n", (5778, 5850), False, 'from forcebalance.nifty import col, eqcgmx, flat, floatornan, fqcgmx, invert_svd, kb, printcool, printcool_dictionary, bohr2ang, warn_press_key\n'), ((5315, 5394), 'forcebalance.nifty.warn_press_key', 'warn_press_key', (["('Line %i: Encountered unsupported section name %s ' % (ln, key))"], {}), "('Line %i: Encountered unsupported section name %s ' % (ln, key))\n", (5329, 5394), False, 'from forcebalance.nifty import col, eqcgmx, flat, floatornan, fqcgmx, invert_svd, kb, printcool, printcool_dictionary, bohr2ang, warn_press_key\n'), ((6065, 6133), 'forcebalance.nifty.warn_press_key', 'warn_press_key', (["('Line %i: one value expected for key %s' % (ln, key))"], {}), "('Line %i: one value expected for key %s' % (ln, key))\n", (6079, 6133), False, 'from forcebalance.nifty import col, eqcgmx, flat, floatornan, fqcgmx, invert_svd, kb, printcool, printcool_dictionary, bohr2ang, warn_press_key\n'), ((4378, 4472), 'forcebalance.nifty.warn_press_key', 'warn_press_key', (["('Line %i: You need to specify a name for the system section ending.' % ln)"], {}), "(\n 'Line %i: You need to specify a name for the system section ending.' % ln)\n", (4392, 4472), False, 'from forcebalance.nifty import col, eqcgmx, flat, floatornan, fqcgmx, invert_svd, kb, printcool, printcool_dictionary, bohr2ang, warn_press_key\n'), ((4567, 4670), 'forcebalance.nifty.warn_press_key', 'warn_press_key', (["('Line %i: A system named %s already exists in Systems' % (ln, section_opts\n ['name']))"], {}), "('Line %i: A system named %s already exists in Systems' % (ln,\n section_opts['name']))\n", (4581, 4670), False, 'from forcebalance.nifty import col, eqcgmx, flat, floatornan, fqcgmx, invert_svd, kb, printcool, printcool_dictionary, bohr2ang, warn_press_key\n')] |
#%%
import sys
import os
#sys.path.append(os.getcwd() + '/connectome_tools/')
os.chdir(os.path.dirname(os.getcwd())) # make directory one step up the current directory
os.chdir(os.path.dirname(os.getcwd())) # make directory one step up the current directory
sys.path.append('/Users/mwinding/repos/maggot_models')
from pymaid_creds import url, name, password, token
import pymaid
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from src.data import load_metagraph
from src.visualization import CLASS_COLOR_DICT, adjplot
from src.traverse import Cascade, to_transmission_matrix
from src.traverse import TraverseDispatcher
from src.visualization import matrixplot
rm = pymaid.CatmaidInstance(url, token, name, password)
import connectome_tools.cascade_analysis as casc
import connectome_tools.celltype as ct
#mg = load_metagraph("Gad", version="2020-06-10", path = '/Volumes/GoogleDrive/My Drive/python_code/maggot_models/data/processed/')
#mg.calculate_degrees(inplace=True)
#adj = mg.adj # adjacency matrix from the "mg" object
adj_ad = pd.read_csv(f'data/adj/all-neurons_ad.csv', index_col = 0).rename(columns=int)
adj = adj_ad.values
clusters = pd.read_csv('cascades/data/meta-method=color_iso-d=8-bic_ratio=0.95-min_split=32.csv', index_col = 0, header = 0)
order = pd.read_csv('cascades/data/signal_flow_order_lvl7.csv').values
# make array from list of lists
order_delisted = []
for sublist in order:
order_delisted.append(sublist[0])
order = np.array(order_delisted)
#%%
# pull sensory annotations and then pull associated skids
order = ['ORN', 'AN sensories', 'MN sensories', 'photoreceptors', 'thermosensories', 'v\'td', 'A1 ascending noci', 'A1 ascending mechano', 'A1 ascending proprio', 'A1 ascending class II_III']
sens = [ct.Celltype(name, pymaid.get_skids_by_annotation(f'mw {name}')) for name in order]
input_skids_list = [x.get_skids() for x in sens]
input_skids = [val for sublist in input_skids_list for val in sublist]
output_names = pymaid.get_annotated('mw brain outputs').name
output_skids_list = list(map(pymaid.get_skids_by_annotation, pymaid.get_annotated('mw brain outputs').name))
output_skids = [val for sublist in output_skids_list for val in sublist]
#%%
# cascades from each sensory modality
p = 0.05
max_hops = 10
n_init = 100
simultaneous = True
adj=adj_ad
input_hit_hist_list = casc.Cascade_Analyzer.run_cascades_parallel(source_skids_list=input_skids_list, stop_skids=output_skids,
adj=adj_ad, p=p, max_hops=max_hops, n_init=n_init, simultaneous=simultaneous)
# **** continue here when new clusters are available
#%%
# grouping cascade indices by cluster type
# level 7 clusters
lvl7 = clusters.groupby('lvl7_labels')
# cluster order and number of neurons per cluster
cluster_lvl7 = []
for key in lvl7.groups.keys():
cluster_lvl7.append([key, len(lvl7.groups[key])])
cluster_lvl7 = pd.DataFrame(cluster_lvl7, columns = ['key', 'num_cluster'])
# breaking signal cascades into cluster groups
input_hit_hist_lvl7 = []
for hit_hist in input_hit_hist_list:
sensory_clustered_hist = []
for key in lvl7.groups.keys():
skids = lvl7.groups[key]
indices = np.where([x in skids for x in mg.meta.index])[0]
cluster_hist = hit_hist[indices]
cluster_hist = pd.DataFrame(cluster_hist, index = indices)
sensory_clustered_hist.append(cluster_hist)
input_hit_hist_lvl7.append(sensory_clustered_hist)
# summed signal cascades per cluster group (hops remain intact)
summed_hist_lvl7 = []
for input_hit_hist in input_hit_hist_lvl7:
sensory_sum_hist = []
for i, cluster in enumerate(input_hit_hist):
sum_cluster = cluster.sum(axis = 0)/(len(cluster.index)) # normalize by number of neurons in cluster
sensory_sum_hist.append(sum_cluster)
sensory_sum_hist = pd.DataFrame(sensory_sum_hist) # column names will be hop number
sensory_sum_hist.index = cluster_lvl7.key # uses cluster name for index of each summed cluster row
summed_hist_lvl7.append(sensory_sum_hist)
# number of neurons per cluster group over threshold (hops remain intact)
threshold = 50
num_hist_lvl7 = []
for input_hit_hist in input_hit_hist_lvl7:
sensory_num_hist = []
for i, cluster in enumerate(input_hit_hist):
num_cluster = (cluster>threshold).sum(axis = 0)
sensory_num_hist.append(num_cluster)
sensory_num_hist = pd.DataFrame(sensory_num_hist) # column names will be hop number
sensory_num_hist.index = cluster_lvl7.key # uses cluster name for index of each summed cluster row
num_hist_lvl7.append(sensory_num_hist)
# %%
# plot signal of all sensories through clusters
# main figure
fig, axs = plt.subplots(
1, 1, figsize=(5, 5)
)
vmax = 300
ax = axs
sns.heatmap(sum(summed_hist_lvl7).loc[order, 0:7], ax = ax, vmax = vmax, rasterized=True, cbar_kws={'label': 'Visits from sensory signal'})
ax.set_ylabel('Individual Clusters')
ax.set_yticks([])
ax.set_xlabel('Hops from sensory neuron signal')
plt.savefig('cascades/cluster_plots/all_sensory_through_clusters_lvl7.pdf', format='pdf', bbox_inches='tight')
# plotting number of neurons downstream of each sensory modality (with threshold)
fig, axs = plt.subplots(
1, 1, figsize=(5, 5)
)
ax = axs
sns.heatmap(sum(num_hist_lvl7).loc[order, 0:7], ax = ax, rasterized=True, cbar_kws={'label': 'Number of Neurons Downstream'})
ax.set_ylabel('Individual Clusters')
ax.set_yticks([])
ax.set_xlabel('Hops from sensory neuron signal')
#%%
#
# checking where inputs, outputs, etc are located in these reordered clusters
# maybe supplemental figure
cluster_membership = []
for cluster_key in order:
cluster_temp = clusters[clusters.lvl7_labels==cluster_key]
cluster_dSEZ = sum(cluster_temp.dSEZ == True)
cluster_dVNC = sum(cluster_temp.dVNC == True)
cluster_RG = sum(cluster_temp.RG == True)
cluster_ORN = sum(cluster_temp.sens_subclass_ORN == True)
cluster_AN = sum(cluster_temp.sens_subclass_AN == True)
cluster_MN = sum(cluster_temp.sens_subclass_MN == True)
cluster_thermo = sum(cluster_temp.sens_subclass_thermo == True)
cluster_photo = sum((cluster_temp.sens_subclass_photoRh5 == True) | (cluster_temp.sens_subclass_photoRh6 == True))
cluster_A00c = sum(cluster_temp.A00c == True)
cluster_vtd = sum(cluster_temp.sens_subclass_vtd == True)
cluster_input = sum(cluster_temp.input == True)
cluster_output = sum(cluster_temp.output == True)
cluster_brain = sum(cluster_temp.brain_neurons == True)
cluster_all = len(cluster_temp.index)
cluster_membership.append(dict({'cluster_key': cluster_key,
'total_neurons': cluster_all, 'brain_neurons': cluster_brain/cluster_all,
'outputs': cluster_output/cluster_all, 'inputs': cluster_input/cluster_all,
'dVNC': cluster_dVNC/cluster_all, 'dSEZ': cluster_dSEZ/cluster_all,
'RG': cluster_RG/cluster_all,
'ORN': cluster_ORN/cluster_all, 'AN': cluster_AN/cluster_all,
'MN': cluster_MN/cluster_all, 'thermo': cluster_thermo/cluster_all,
'photo': cluster_photo/cluster_all, 'noci': cluster_A00c/cluster_all,
'vtd': cluster_vtd/cluster_all}))
cluster_membership = pd.DataFrame(cluster_membership)
fig, ax = plt.subplots(
1, 1, figsize=(5, 5)
)
sns.heatmap(cluster_membership.iloc[:, 3:len(cluster_membership)], rasterized = True, cbar_kws={'label': 'Fraction of Cluster'}, ax = ax)
ax.set_ylabel('Individual Clusters')
ax.set_yticks([])
ax.set_xlabel('Cell Type Membership')
fig.savefig('cascades/cluster_plots/location_inputs_outputs_clusters.pdf', format='pdf', bbox_inches='tight')
fig, ax = plt.subplots(
1, 1, figsize=(5, 5)
)
sns.heatmap(cluster_membership.iloc[:, 3:8], rasterized = True, cbar_kws={'label': 'Fraction of Cluster'}, ax = ax)
ax.set_ylabel('Individual Clusters')
ax.set_yticks([])
ax.set_xlabel('Cell Type Membership')
fig.savefig('cascades/cluster_plots/location_inputs_outputs_clusters_simplified.pdf', format='pdf', bbox_inches='tight')
# %%
# plot signal of each sensory modality through clusters
# probably supplemental figure
fig, axs = plt.subplots(
4, 2, figsize=(10, 10)
)
fig.tight_layout(pad=2.0)
vmax = n_init*.8
for i in range(0, len(input_names_format_reordered)):
if(i<4):
ax = axs[i, 0]
if(i>=4):
ax = axs[i-4,1]
sns.heatmap(summed_hist_lvl7[i].loc[order], ax = ax, rasterized=True, vmax = vmax, cbar_kws={'label': 'Average Number of Visits'})
ax.set_ylabel('Individual Clusters')
ax.set_yticks([])
ax.set_xlabel('Hops from %s signal' %input_names_format_reordered[i])
#sns.heatmap(summed_hist_lvl7[1].loc[sort], ax = ax, rasterized=True)
ax = axs[3, 1]
ax.axis("off")
caption = f"Figure: Hop histogram of individual level 7 clusters\nCascades starting at each sensory modality\nending at brain output neurons"
ax.text(0, 1, caption, va="top")
plt.savefig('cascades/cluster_plots/sensory_through_clusters_lvl7.pdf', format='pdf', bbox_inches='tight')
#%%
# plot number of neurons downstream each sensory modality with threshold
fig, axs = plt.subplots(
4, 2, figsize=(10, 10)
)
fig.tight_layout(pad=2.0)
vmax = 20
for i in range(0, len(input_names_format_reordered)):
if(i<4):
ax = axs[i, 0]
if(i>=4):
ax = axs[i-4,1]
sns.heatmap(num_hist_lvl7[i].loc[order], ax = ax, rasterized=True, vmax = vmax, cbar_kws={'label': 'Number of Neurons'})
ax.set_ylabel('Individual Clusters')
ax.set_yticks([])
ax.set_xlabel('Hops from %s signal' %input_names_format_reordered[i])
#sns.heatmap(summed_hist_lvl7[1].loc[sort], ax = ax, rasterized=True)
ax = axs[3, 1]
ax.axis("off")
caption = f"Figure: Hop histogram of individual level 7 clusters\nCascades starting at each sensory modality\nending at brain output neurons"
ax.text(0, 1, caption, va="top")
plt.savefig('cascades/cluster_plots/sensory_through_clusters_lvl7_num_neurons.pdf', format='pdf', bbox_inches='tight')
# %%
# %%
# mutlisensory nature of each cluster;
# not clear if this is a figure or not
# allows text to be editable in Illustrator
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
plt.rcParams['font.size'] = 6
collapsed_hops_lvl7_list = []
for hist in summed_hist_lvl7:
collapsed_hops_lvl7_list.append(hist.sum(axis = 1))
collapsed_hops_lvl7 = pd.DataFrame(collapsed_hops_lvl7_list, index = input_names_format_reordered).T
fg = sns.clustermap(collapsed_hops_lvl7.loc[order], col_cluster = False, yticklabels=False,
rasterized = True, figsize = (1.75, 2.5))
ax = fg.ax_heatmap
ax.set_ylabel('Individual Clusters')
ax.set_xticklabels(ax.get_xticklabels(), rotation=45, horizontalalignment='right');
fg.savefig('cascades/cluster_plots/multimodal_nature_of_clusters_lvl7.pdf', format='pdf', bbox_inches='tight')
# %%
| [
"sys.path.append",
"pandas.DataFrame",
"pymaid.CatmaidInstance",
"seaborn.heatmap",
"seaborn.clustermap",
"pandas.read_csv",
"pymaid.get_annotated",
"os.getcwd",
"connectome_tools.cascade_analysis.Cascade_Analyzer.run_cascades_parallel",
"pymaid.get_skids_by_annotation",
"numpy.where",
"numpy.... | [((259, 313), 'sys.path.append', 'sys.path.append', (['"""/Users/mwinding/repos/maggot_models"""'], {}), "('/Users/mwinding/repos/maggot_models')\n", (274, 313), False, 'import sys\n'), ((716, 766), 'pymaid.CatmaidInstance', 'pymaid.CatmaidInstance', (['url', 'token', 'name', 'password'], {}), '(url, token, name, password)\n', (738, 766), False, 'import pymaid\n'), ((1201, 1319), 'pandas.read_csv', 'pd.read_csv', (['"""cascades/data/meta-method=color_iso-d=8-bic_ratio=0.95-min_split=32.csv"""'], {'index_col': '(0)', 'header': '(0)'}), "(\n 'cascades/data/meta-method=color_iso-d=8-bic_ratio=0.95-min_split=32.csv',\n index_col=0, header=0)\n", (1212, 1319), True, 'import pandas as pd\n'), ((1508, 1532), 'numpy.array', 'np.array', (['order_delisted'], {}), '(order_delisted)\n', (1516, 1532), True, 'import numpy as np\n'), ((2377, 2569), 'connectome_tools.cascade_analysis.Cascade_Analyzer.run_cascades_parallel', 'casc.Cascade_Analyzer.run_cascades_parallel', ([], {'source_skids_list': 'input_skids_list', 'stop_skids': 'output_skids', 'adj': 'adj_ad', 'p': 'p', 'max_hops': 'max_hops', 'n_init': 'n_init', 'simultaneous': 'simultaneous'}), '(source_skids_list=\n input_skids_list, stop_skids=output_skids, adj=adj_ad, p=p, max_hops=\n max_hops, n_init=n_init, simultaneous=simultaneous)\n', (2420, 2569), True, 'import connectome_tools.cascade_analysis as casc\n'), ((2960, 3018), 'pandas.DataFrame', 'pd.DataFrame', (['cluster_lvl7'], {'columns': "['key', 'num_cluster']"}), "(cluster_lvl7, columns=['key', 'num_cluster'])\n", (2972, 3018), True, 'import pandas as pd\n'), ((4761, 4795), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(5, 5)'}), '(1, 1, figsize=(5, 5))\n', (4773, 4795), True, 'import matplotlib.pyplot as plt\n'), ((5069, 5183), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""cascades/cluster_plots/all_sensory_through_clusters_lvl7.pdf"""'], {'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "('cascades/cluster_plots/all_sensory_through_clusters_lvl7.pdf',\n format='pdf', bbox_inches='tight')\n", (5080, 5183), True, 'import matplotlib.pyplot as plt\n'), ((5274, 5308), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(5, 5)'}), '(1, 1, figsize=(5, 5))\n', (5286, 5308), True, 'import matplotlib.pyplot as plt\n'), ((7446, 7478), 'pandas.DataFrame', 'pd.DataFrame', (['cluster_membership'], {}), '(cluster_membership)\n', (7458, 7478), True, 'import pandas as pd\n'), ((7490, 7524), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(5, 5)'}), '(1, 1, figsize=(5, 5))\n', (7502, 7524), True, 'import matplotlib.pyplot as plt\n'), ((7884, 7918), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(5, 5)'}), '(1, 1, figsize=(5, 5))\n', (7896, 7918), True, 'import matplotlib.pyplot as plt\n'), ((7926, 8042), 'seaborn.heatmap', 'sns.heatmap', (['cluster_membership.iloc[:, 3:8]'], {'rasterized': '(True)', 'cbar_kws': "{'label': 'Fraction of Cluster'}", 'ax': 'ax'}), "(cluster_membership.iloc[:, 3:8], rasterized=True, cbar_kws={\n 'label': 'Fraction of Cluster'}, ax=ax)\n", (7937, 8042), True, 'import seaborn as sns\n'), ((8361, 8397), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(2)'], {'figsize': '(10, 10)'}), '(4, 2, figsize=(10, 10))\n', (8373, 8397), True, 'import matplotlib.pyplot as plt\n'), ((9137, 9247), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""cascades/cluster_plots/sensory_through_clusters_lvl7.pdf"""'], {'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "('cascades/cluster_plots/sensory_through_clusters_lvl7.pdf',\n format='pdf', bbox_inches='tight')\n", (9148, 9247), True, 'import matplotlib.pyplot as plt\n'), ((9334, 9370), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(2)'], {'figsize': '(10, 10)'}), '(4, 2, figsize=(10, 10))\n', (9346, 9370), True, 'import matplotlib.pyplot as plt\n'), ((10093, 10220), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""cascades/cluster_plots/sensory_through_clusters_lvl7_num_neurons.pdf"""'], {'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "(\n 'cascades/cluster_plots/sensory_through_clusters_lvl7_num_neurons.pdf',\n format='pdf', bbox_inches='tight')\n", (10104, 10220), True, 'import matplotlib.pyplot as plt\n'), ((10671, 10797), 'seaborn.clustermap', 'sns.clustermap', (['collapsed_hops_lvl7.loc[order]'], {'col_cluster': '(False)', 'yticklabels': '(False)', 'rasterized': '(True)', 'figsize': '(1.75, 2.5)'}), '(collapsed_hops_lvl7.loc[order], col_cluster=False,\n yticklabels=False, rasterized=True, figsize=(1.75, 2.5))\n', (10685, 10797), True, 'import seaborn as sns\n'), ((1323, 1378), 'pandas.read_csv', 'pd.read_csv', (['"""cascades/data/signal_flow_order_lvl7.csv"""'], {}), "('cascades/data/signal_flow_order_lvl7.csv')\n", (1334, 1378), True, 'import pandas as pd\n'), ((2015, 2055), 'pymaid.get_annotated', 'pymaid.get_annotated', (['"""mw brain outputs"""'], {}), "('mw brain outputs')\n", (2035, 2055), False, 'import pymaid\n'), ((3903, 3933), 'pandas.DataFrame', 'pd.DataFrame', (['sensory_sum_hist'], {}), '(sensory_sum_hist)\n', (3915, 3933), True, 'import pandas as pd\n'), ((4471, 4501), 'pandas.DataFrame', 'pd.DataFrame', (['sensory_num_hist'], {}), '(sensory_num_hist)\n', (4483, 4501), True, 'import pandas as pd\n'), ((8586, 8717), 'seaborn.heatmap', 'sns.heatmap', (['summed_hist_lvl7[i].loc[order]'], {'ax': 'ax', 'rasterized': '(True)', 'vmax': 'vmax', 'cbar_kws': "{'label': 'Average Number of Visits'}"}), "(summed_hist_lvl7[i].loc[order], ax=ax, rasterized=True, vmax=\n vmax, cbar_kws={'label': 'Average Number of Visits'})\n", (8597, 8717), True, 'import seaborn as sns\n'), ((9552, 9672), 'seaborn.heatmap', 'sns.heatmap', (['num_hist_lvl7[i].loc[order]'], {'ax': 'ax', 'rasterized': '(True)', 'vmax': 'vmax', 'cbar_kws': "{'label': 'Number of Neurons'}"}), "(num_hist_lvl7[i].loc[order], ax=ax, rasterized=True, vmax=vmax,\n cbar_kws={'label': 'Number of Neurons'})\n", (9563, 9672), True, 'import seaborn as sns\n'), ((10586, 10660), 'pandas.DataFrame', 'pd.DataFrame', (['collapsed_hops_lvl7_list'], {'index': 'input_names_format_reordered'}), '(collapsed_hops_lvl7_list, index=input_names_format_reordered)\n', (10598, 10660), True, 'import pandas as pd\n'), ((104, 115), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (113, 115), False, 'import os\n'), ((194, 205), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (203, 205), False, 'import os\n'), ((1090, 1146), 'pandas.read_csv', 'pd.read_csv', (['f"""data/adj/all-neurons_ad.csv"""'], {'index_col': '(0)'}), "(f'data/adj/all-neurons_ad.csv', index_col=0)\n", (1101, 1146), True, 'import pandas as pd\n'), ((1814, 1858), 'pymaid.get_skids_by_annotation', 'pymaid.get_skids_by_annotation', (['f"""mw {name}"""'], {}), "(f'mw {name}')\n", (1844, 1858), False, 'import pymaid\n'), ((3363, 3404), 'pandas.DataFrame', 'pd.DataFrame', (['cluster_hist'], {'index': 'indices'}), '(cluster_hist, index=indices)\n', (3375, 3404), True, 'import pandas as pd\n'), ((2122, 2162), 'pymaid.get_annotated', 'pymaid.get_annotated', (['"""mw brain outputs"""'], {}), "('mw brain outputs')\n", (2142, 2162), False, 'import pymaid\n'), ((3250, 3297), 'numpy.where', 'np.where', (['[(x in skids) for x in mg.meta.index]'], {}), '([(x in skids) for x in mg.meta.index])\n', (3258, 3297), True, 'import numpy as np\n')] |
import os
import sys
import cv2
import numpy as np
def get_binary_img(img):
# gray img to bin image
bin_img = np.zeros(shape=(img.shape), dtype=np.uint8)
h = img.shape[0]
w = img.shape[1]
for i in range(h):
for j in range(w):
bin_img[i][j] = 255 if img[i][j] > 127 else 0
return bin_img
def get_vertical_project(img):
h, w = img.shape
project_img = np.zeros(shape=(img.shape), dtype=np.uint8) + 255
for j in range(w):
num = 0
for i in range(h):
if img[i][j] == 0:
num+=1
for k in range(num):
project_img[h-1-k][j] = 0
return project_img
def get_horizon_project(img):
h, w = img.shape
project_img = np.zeros(shape=(img.shape), dtype=np.uint8) + 255
for i in range(h):
num = 0
for j in range(w):
if img[i][j] == 0:
num+=1
for k in range(num):
project_img[i][k] = 0
return project_img
def get_vertical_project_update(img):
h, w = img.shape
project_img = np.zeros(shape=(img.shape), dtype=np.uint8) + 255
start = end = 0
find_start = find_end = 0
pre_num = 0
for j in range(w):
num = 0
for i in range(h):
if img[i][j] == 0:
num+=1
for k in range(num):
project_img[h-1-k][j] = 0
if not find_start and pre_num==0 and num != pre_num :
start = j
find_start = 1
if not find_end and num == 0 and num != pre_num:
end = j
find_end = 1
pre_num = num
return project_img,start,end
def get_horizon_project_update(img):
h, w = img.shape
project_img = np.zeros(shape=(img.shape), dtype=np.uint8) + 255
start = end = 0
find_start = find_end = 0
pre_num = 0
for i in range(h):
num = 0
for j in range(w):
if img[i][j] == 0:
num+=1
for k in range(num):
project_img[i][k] = 0
if not find_start and pre_num==0 and num != pre_num :
start = i
find_start = 1
if not find_end and num == 0 and num != pre_num:
end = i
find_end = 1
pre_num = num
return project_img,start,end
def test1():
file_name = "./sample/test2.png"
img = cv2.imread(file_name)
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 调用
bin_img = get_binary_img(gray_img)
horizon_img = get_horizon_project(bin_img)
vertical_img = get_vertical_project(bin_img)
# 显示
cv2.imshow("bin", bin_img)
cv2.imshow("horizon", horizon_img)
cv2.imshow("vertical", vertical_img)
cv2.waitKey(0)
def draw_horizon(img,start_i,end_i):
out_img = img.copy()
h,w,c = img.shape
cv2.line(out_img, (0, start_i),( h- 1, start_i),color=(0,0,255), thickness=2)
cv2.line(out_img, (0, end_i), (h - 1, end_i), color=(0, 0, 255), thickness=2)
return out_img
def draw_vertical(img,start_j,end_j):
out_img = img.copy()
h,w,c = img.shape
cv2.line(out_img, (start_j, 0), (start_j, h - 1), color=(0, 0, 255), thickness=2)
cv2.line(out_img, (end_j, 0), (end_j, h - 1), color=(0, 0, 255), thickness=2)
return out_img
def test2():
file_name = "/media/zhaoqichao/92A8C550A8C5340F/wechat/20210818/test2.png"
img = cv2.imread(file_name)
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 调用
bin_img = get_binary_img(gray_img)
horizon_img,start_i,end_i = get_horizon_project_update(bin_img)
vertical_img,start_j,end_j = get_vertical_project_update(bin_img)
# 画图
show_img = img.copy()
cv2.rectangle(show_img, (start_j, start_i), (end_j, end_i), color=(0, 0, 255), thickness=2)
# 展示
cv2.imshow("bin", bin_img)
cv2.imshow("horizon", horizon_img)
cv2.imshow("vertical", vertical_img)
# 画图
merge_bin = cv2.merge([bin_img, bin_img, bin_img])
merge_horizon = cv2.merge([horizon_img, horizon_img, horizon_img])
merge_vertical = cv2.merge([vertical_img, vertical_img, vertical_img])
out_bin_horizon = draw_horizon(merge_bin,start_i,end_i)
out_horizon = draw_horizon(merge_horizon, start_i, end_i)
out_bin_vertical = draw_vertical(merge_bin, start_j, end_j)
out_vertical = draw_vertical(merge_vertical, start_j, end_j)
cv2.imshow("res", show_img)
cv2.imshow("out_bin_horizon", out_bin_horizon)
cv2.imshow("out_horizon", out_horizon)
cv2.imshow("out_bin_vertical", out_bin_vertical)
cv2.imshow("out_vertical", out_vertical)
cv2.imwrite("out_bin_horizon.jpg", out_bin_horizon)
cv2.imwrite("out_horizon.jpg", out_horizon)
cv2.imwrite("out_bin_vertical.jpg", out_bin_vertical)
cv2.imwrite("out_vertical.jpg", out_vertical)
cv2.imwrite("res.jpg", show_img)
out5 = cv2.hconcat([merge_bin,out_horizon, out_vertical,show_img])
cv2.imwrite("out5.jpg", out5)
cv2.waitKey(0)
if __name__ == "__main__":
#test1()
test2()
| [
"cv2.line",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.imwrite",
"numpy.zeros",
"cv2.imread",
"cv2.hconcat",
"cv2.rectangle",
"cv2.merge",
"cv2.imshow"
] | [((120, 161), 'numpy.zeros', 'np.zeros', ([], {'shape': 'img.shape', 'dtype': 'np.uint8'}), '(shape=img.shape, dtype=np.uint8)\n', (128, 161), True, 'import numpy as np\n'), ((2354, 2375), 'cv2.imread', 'cv2.imread', (['file_name'], {}), '(file_name)\n', (2364, 2375), False, 'import cv2\n'), ((2391, 2428), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (2403, 2428), False, 'import cv2\n'), ((2586, 2612), 'cv2.imshow', 'cv2.imshow', (['"""bin"""', 'bin_img'], {}), "('bin', bin_img)\n", (2596, 2612), False, 'import cv2\n'), ((2617, 2651), 'cv2.imshow', 'cv2.imshow', (['"""horizon"""', 'horizon_img'], {}), "('horizon', horizon_img)\n", (2627, 2651), False, 'import cv2\n'), ((2656, 2692), 'cv2.imshow', 'cv2.imshow', (['"""vertical"""', 'vertical_img'], {}), "('vertical', vertical_img)\n", (2666, 2692), False, 'import cv2\n'), ((2697, 2711), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2708, 2711), False, 'import cv2\n'), ((2802, 2887), 'cv2.line', 'cv2.line', (['out_img', '(0, start_i)', '(h - 1, start_i)'], {'color': '(0, 0, 255)', 'thickness': '(2)'}), '(out_img, (0, start_i), (h - 1, start_i), color=(0, 0, 255),\n thickness=2)\n', (2810, 2887), False, 'import cv2\n'), ((2885, 2962), 'cv2.line', 'cv2.line', (['out_img', '(0, end_i)', '(h - 1, end_i)'], {'color': '(0, 0, 255)', 'thickness': '(2)'}), '(out_img, (0, end_i), (h - 1, end_i), color=(0, 0, 255), thickness=2)\n', (2893, 2962), False, 'import cv2\n'), ((3074, 3159), 'cv2.line', 'cv2.line', (['out_img', '(start_j, 0)', '(start_j, h - 1)'], {'color': '(0, 0, 255)', 'thickness': '(2)'}), '(out_img, (start_j, 0), (start_j, h - 1), color=(0, 0, 255),\n thickness=2)\n', (3082, 3159), False, 'import cv2\n'), ((3160, 3237), 'cv2.line', 'cv2.line', (['out_img', '(end_j, 0)', '(end_j, h - 1)'], {'color': '(0, 0, 255)', 'thickness': '(2)'}), '(out_img, (end_j, 0), (end_j, h - 1), color=(0, 0, 255), thickness=2)\n', (3168, 3237), False, 'import cv2\n'), ((3361, 3382), 'cv2.imread', 'cv2.imread', (['file_name'], {}), '(file_name)\n', (3371, 3382), False, 'import cv2\n'), ((3398, 3435), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (3410, 3435), False, 'import cv2\n'), ((3661, 3757), 'cv2.rectangle', 'cv2.rectangle', (['show_img', '(start_j, start_i)', '(end_j, end_i)'], {'color': '(0, 0, 255)', 'thickness': '(2)'}), '(show_img, (start_j, start_i), (end_j, end_i), color=(0, 0, \n 255), thickness=2)\n', (3674, 3757), False, 'import cv2\n'), ((3766, 3792), 'cv2.imshow', 'cv2.imshow', (['"""bin"""', 'bin_img'], {}), "('bin', bin_img)\n", (3776, 3792), False, 'import cv2\n'), ((3797, 3831), 'cv2.imshow', 'cv2.imshow', (['"""horizon"""', 'horizon_img'], {}), "('horizon', horizon_img)\n", (3807, 3831), False, 'import cv2\n'), ((3836, 3872), 'cv2.imshow', 'cv2.imshow', (['"""vertical"""', 'vertical_img'], {}), "('vertical', vertical_img)\n", (3846, 3872), False, 'import cv2\n'), ((3899, 3937), 'cv2.merge', 'cv2.merge', (['[bin_img, bin_img, bin_img]'], {}), '([bin_img, bin_img, bin_img])\n', (3908, 3937), False, 'import cv2\n'), ((3958, 4008), 'cv2.merge', 'cv2.merge', (['[horizon_img, horizon_img, horizon_img]'], {}), '([horizon_img, horizon_img, horizon_img])\n', (3967, 4008), False, 'import cv2\n'), ((4030, 4083), 'cv2.merge', 'cv2.merge', (['[vertical_img, vertical_img, vertical_img]'], {}), '([vertical_img, vertical_img, vertical_img])\n', (4039, 4083), False, 'import cv2\n'), ((4342, 4369), 'cv2.imshow', 'cv2.imshow', (['"""res"""', 'show_img'], {}), "('res', show_img)\n", (4352, 4369), False, 'import cv2\n'), ((4374, 4420), 'cv2.imshow', 'cv2.imshow', (['"""out_bin_horizon"""', 'out_bin_horizon'], {}), "('out_bin_horizon', out_bin_horizon)\n", (4384, 4420), False, 'import cv2\n'), ((4425, 4463), 'cv2.imshow', 'cv2.imshow', (['"""out_horizon"""', 'out_horizon'], {}), "('out_horizon', out_horizon)\n", (4435, 4463), False, 'import cv2\n'), ((4468, 4516), 'cv2.imshow', 'cv2.imshow', (['"""out_bin_vertical"""', 'out_bin_vertical'], {}), "('out_bin_vertical', out_bin_vertical)\n", (4478, 4516), False, 'import cv2\n'), ((4521, 4561), 'cv2.imshow', 'cv2.imshow', (['"""out_vertical"""', 'out_vertical'], {}), "('out_vertical', out_vertical)\n", (4531, 4561), False, 'import cv2\n'), ((4567, 4618), 'cv2.imwrite', 'cv2.imwrite', (['"""out_bin_horizon.jpg"""', 'out_bin_horizon'], {}), "('out_bin_horizon.jpg', out_bin_horizon)\n", (4578, 4618), False, 'import cv2\n'), ((4623, 4666), 'cv2.imwrite', 'cv2.imwrite', (['"""out_horizon.jpg"""', 'out_horizon'], {}), "('out_horizon.jpg', out_horizon)\n", (4634, 4666), False, 'import cv2\n'), ((4671, 4724), 'cv2.imwrite', 'cv2.imwrite', (['"""out_bin_vertical.jpg"""', 'out_bin_vertical'], {}), "('out_bin_vertical.jpg', out_bin_vertical)\n", (4682, 4724), False, 'import cv2\n'), ((4729, 4774), 'cv2.imwrite', 'cv2.imwrite', (['"""out_vertical.jpg"""', 'out_vertical'], {}), "('out_vertical.jpg', out_vertical)\n", (4740, 4774), False, 'import cv2\n'), ((4779, 4811), 'cv2.imwrite', 'cv2.imwrite', (['"""res.jpg"""', 'show_img'], {}), "('res.jpg', show_img)\n", (4790, 4811), False, 'import cv2\n'), ((4824, 4885), 'cv2.hconcat', 'cv2.hconcat', (['[merge_bin, out_horizon, out_vertical, show_img]'], {}), '([merge_bin, out_horizon, out_vertical, show_img])\n', (4835, 4885), False, 'import cv2\n'), ((4888, 4917), 'cv2.imwrite', 'cv2.imwrite', (['"""out5.jpg"""', 'out5'], {}), "('out5.jpg', out5)\n", (4899, 4917), False, 'import cv2\n'), ((4922, 4936), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (4933, 4936), False, 'import cv2\n'), ((406, 447), 'numpy.zeros', 'np.zeros', ([], {'shape': 'img.shape', 'dtype': 'np.uint8'}), '(shape=img.shape, dtype=np.uint8)\n', (414, 447), True, 'import numpy as np\n'), ((738, 779), 'numpy.zeros', 'np.zeros', ([], {'shape': 'img.shape', 'dtype': 'np.uint8'}), '(shape=img.shape, dtype=np.uint8)\n', (746, 779), True, 'import numpy as np\n'), ((1075, 1116), 'numpy.zeros', 'np.zeros', ([], {'shape': 'img.shape', 'dtype': 'np.uint8'}), '(shape=img.shape, dtype=np.uint8)\n', (1083, 1116), True, 'import numpy as np\n'), ((1725, 1766), 'numpy.zeros', 'np.zeros', ([], {'shape': 'img.shape', 'dtype': 'np.uint8'}), '(shape=img.shape, dtype=np.uint8)\n', (1733, 1766), True, 'import numpy as np\n')] |
import warnings
from typing import TYPE_CHECKING
from typing import Dict
from typing import List
from typing import Optional
import numpy as np
import pandas as pd
from etna.clustering.distances.base import Distance
from etna.core import BaseMixin
from etna.loggers import tslogger
if TYPE_CHECKING:
from etna.datasets import TSDataset
class DistanceMatrix(BaseMixin):
"""DistanceMatrix computes distance matrix from TSDataset."""
def __init__(self, distance: Distance):
"""Init DistanceMatrix.
Parameters
----------
distance:
class for distance measurement
"""
self.distance = distance
self.matrix: Optional[np.ndarray] = None
self.series: Optional[List[np.ndarray]] = None
self.segment2idx: Dict[str, int] = {}
self.idx2segment: Dict[int, str] = {}
self.series_number: Optional[int] = None
@staticmethod
def _validate_dataset(ts: "TSDataset"):
"""Check that dataset does not contain NaNs."""
for segment in ts.segments:
series = ts[:, segment, "target"]
first_valid_index = 0
last_valid_index = series.reset_index(drop=True).last_valid_index()
series_length = last_valid_index - first_valid_index + 1
if len(series.dropna()) != series_length:
warnings.warn(
f"Timeseries contains NaN values, which will be dropped. "
f"If it is not desirable behaviour, handle them manually."
)
break
def _get_series(self, ts: "TSDataset") -> List[pd.Series]:
"""Parse given TSDataset and get timestamp-indexed segment series.
Build mapping from segment to idx in matrix and vice versa.
"""
series_list = []
for i, segment in enumerate(ts.segments):
self.segment2idx[segment] = i
self.idx2segment[i] = segment
series = ts[:, segment, "target"].dropna()
series_list.append(series)
self.series_number = len(series_list)
return series_list
def _compute_dist(self, series: List[pd.Series], idx: int) -> np.ndarray:
"""Compute distance from idx-th series to other ones."""
if self.series_number is None:
raise ValueError("Something went wrong during getting the series from dataset!")
distances = np.array([self.distance(series[idx], series[j]) for j in range(self.series_number)])
return distances
def _compute_dist_matrix(self, series: List[pd.Series]) -> np.ndarray:
"""Compute distance matrix for given series."""
if self.series_number is None:
raise ValueError("Something went wrong during getting the series from dataset!")
distances = np.empty(shape=(self.series_number, self.series_number))
logging_freq = max(1, self.series_number // 10)
tslogger.log(f"Calculating distance matrix...")
for idx in range(self.series_number):
distances[idx] = self._compute_dist(series=series, idx=idx)
if (idx + 1) % logging_freq == 0:
tslogger.log(f"Done {idx + 1} out of {self.series_number} ")
return distances
def fit(self, ts: "TSDataset") -> "DistanceMatrix":
"""Fit distance matrix: get timeseries from ts and compute pairwise distances.
Parameters
----------
ts:
TSDataset with timeseries
Returns
-------
self:
fitted DistanceMatrix object
"""
self._validate_dataset(ts)
self.series = self._get_series(ts)
self.matrix = self._compute_dist_matrix(self.series)
return self
def predict(self) -> np.ndarray:
"""Get distance matrix.
Returns
-------
np.ndarray:
2D array with distances between series
"""
if self.matrix is None:
raise ValueError("DistanceMatrix is not fitted! Fit the DistanceMatrix before calling predict method!")
return self.matrix
def fit_predict(self, ts: "TSDataset") -> np.ndarray:
"""Compute distance matrix and return it.
Parameters
----------
ts:
TSDataset with timeseries to compute matrix with
Returns
-------
np.ndarray:
2D array with distances between series
"""
return self.fit(ts).predict()
__all__ = ["DistanceMatrix"]
| [
"numpy.empty",
"etna.loggers.tslogger.log",
"warnings.warn"
] | [((2814, 2870), 'numpy.empty', 'np.empty', ([], {'shape': '(self.series_number, self.series_number)'}), '(shape=(self.series_number, self.series_number))\n', (2822, 2870), True, 'import numpy as np\n'), ((2935, 2982), 'etna.loggers.tslogger.log', 'tslogger.log', (['f"""Calculating distance matrix..."""'], {}), "(f'Calculating distance matrix...')\n", (2947, 2982), False, 'from etna.loggers import tslogger\n'), ((1365, 1503), 'warnings.warn', 'warnings.warn', (['f"""Timeseries contains NaN values, which will be dropped. If it is not desirable behaviour, handle them manually."""'], {}), "(\n f'Timeseries contains NaN values, which will be dropped. If it is not desirable behaviour, handle them manually.'\n )\n", (1378, 1503), False, 'import warnings\n'), ((3163, 3223), 'etna.loggers.tslogger.log', 'tslogger.log', (['f"""Done {idx + 1} out of {self.series_number} """'], {}), "(f'Done {idx + 1} out of {self.series_number} ')\n", (3175, 3223), False, 'from etna.loggers import tslogger\n')] |
try:
import debug_settings
except:
pass
import unittest
import torch
import os
import numpy as np
import gym
from gym import spaces
import matplotlib
import time
from torch import nn
from torch.optim import Adam
from torch.autograd import Variable
# BARK imports
from bark.runtime.commons.parameters import ParameterServer
# BARK-ML imports
from bark_ml.environments.blueprints import \
DiscreteHighwayBlueprint, DiscreteMergingBlueprint
from bark_ml.environments.single_agent_runtime import SingleAgentRuntime
import bark_ml.environments.gym
from bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.agent import FQFAgent, IQNAgent
from bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.model import IQN
from bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.network import initialize_weights_he
from bark_ml.observers.nearest_state_observer import NearestAgentsObserver
from bark_ml.behaviors.discrete_behavior import BehaviorDiscreteMacroActionsML
from bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.utils import calculate_expert_loss,\
calculate_supervised_classification_quantile_loss, calculate_huber_loss, \
evaluate_quantile_at_action, get_margin_loss, update_params
def set_grad(var):
def hook(grad):
var.grad = grad
return hook
class TestDQN(nn.Module):
def __init__(self, num_channels, hidden=4, embedding_dim=1):
super(TestDQN, self).__init__()
self.net = nn.Sequential(
torch.nn.Linear(num_channels, hidden),
torch.nn.Linear(hidden, embedding_dim),
).apply(initialize_weights_he)
self.embedding_dim = embedding_dim
def forward(self, states):
batch_size = states.shape[0]
# Calculate embeddings of states.
state_embedding = self.net(states)
assert state_embedding.shape == (batch_size, self.embedding_dim)
return state_embedding
class TestCosineEmbeddingNet(nn.Module):
def __init__(self, num_cosines=4, embedding_dim=1, noisy_net=False):
super(TestCosineEmbeddingNet, self).__init__()
linear = nn.Linear
self.net = nn.Sequential(linear(num_cosines, embedding_dim), nn.ReLU())
self.num_cosines = num_cosines
self.embedding_dim = embedding_dim
def forward(self, taus):
batch_size = taus.shape[0]
N = taus.shape[1]
# Calculate i * \pi (i=1,...,N).
i_pi = np.pi * torch.arange(start=1,
end=self.num_cosines + 1,
dtype=taus.dtype,
device=taus.device).view(
1, 1, self.num_cosines)
# Calculate cos(i * \pi * \tau).
cosines = torch.cos(taus.view(batch_size, N, 1) * i_pi).view(
batch_size * N, self.num_cosines)
# Calculate embeddings of taus.
tau_embeddings = self.net(cosines).view(batch_size, N, self.embedding_dim)
return tau_embeddings
class TestQuantileNet(nn.Module):
def __init__(self, num_actions, embedding_dim=1, noisy_net=False):
super(TestQuantileNet, self).__init__()
linear = nn.Linear
self.net = nn.Sequential(
linear(embedding_dim, 4),
nn.ReLU(),
linear(4, num_actions),
)
self.num_actions = num_actions
self.embedding_dim = embedding_dim
self.noisy_net = noisy_net
def forward(self, state_embeddings, tau_embeddings):
assert state_embeddings.shape[0] == tau_embeddings.shape[0]
assert state_embeddings.shape[1] == tau_embeddings.shape[2]
# NOTE: Because variable taus correspond to either \tau or \hat \tau
# in the paper, N isn't neccesarily the same as fqf.N.
batch_size = state_embeddings.shape[0]
N = tau_embeddings.shape[1]
# Reshape into (batch_size, 1, embedding_dim).
state_embeddings = state_embeddings.view(batch_size, 1, self.embedding_dim)
# Calculate embeddings of states and taus.
embeddings = (state_embeddings * tau_embeddings).view(
batch_size * N, self.embedding_dim)
# Calculate quantile values.
quantiles = self.net(embeddings)
return quantiles.view(batch_size, N, self.num_actions)
class TestIQN(IQN):
def __init__(self, num_channels, num_actions, params, num_cosines, noisy_net):
super(TestIQN, self).__init__(num_channels, num_actions, params, num_cosines, noisy_net)
self.K = 64
self.N = 64
self.N_dash = 64
self.embedding_dim = 1
# Feature extractor of DQN.
self.dqn_net = TestDQN(num_channels=num_channels,
embedding_dim=self.embedding_dim,
hidden=4)
# Cosine embedding network.
self.cosine_net = TestCosineEmbeddingNet(num_cosines=num_cosines,
embedding_dim=self.embedding_dim,
noisy_net=noisy_net)
# Quantile network.
self.quantile_net = TestQuantileNet(num_actions=num_actions,
embedding_dim=self.embedding_dim,
noisy_net=noisy_net)
class LossTests(unittest.TestCase):
def test_quantile_huber_loss(self):
td_errors = torch.zeros((1, 2, 1))
td_errors[:, 0, :] = 0.0
td_errors[:, 1, :] = 2.0
taus = torch.rand(1, 2)
kappa = 1.0
quantile_huber_loss = calculate_huber_loss(td_errors, kappa=kappa).squeeze()
assert quantile_huber_loss[0] == 0.0
assert quantile_huber_loss[1] == kappa * (td_errors[0, 1, 0] - 0.5 * kappa)
def test_supervised_margin_loss(self):
expert_margin = 0.8
supervised_loss_weight = 0.5
num_actions = 2
batch_size = 1
state_size = 1
params = ParameterServer()
states = torch.rand((batch_size, state_size))
next_states = torch.rand((batch_size, state_size))
actions = torch.zeros((batch_size, 1), dtype=torch.int64)
actions[states >= 0.5] = 1.0
is_demos = torch.zeros((batch_size, 1))
is_demos[(actions.squeeze() == 1.0).nonzero()] = 1.0
state_shape = spaces.Box(low=np.zeros(state_size), high=np.zeros(state_size))
test_iqn = TestIQN(num_channels=state_shape.shape[0], num_actions=num_actions, params=params,
num_cosines=4, noisy_net=False)
taus = torch.rand(batch_size, test_iqn.N)
state_embeddings = test_iqn.dqn_net(states)
next_state_embeddings = test_iqn.dqn_net(next_states)
supervised_classification_loss = calculate_supervised_classification_quantile_loss(actions,
states, test_iqn, taus, state_embeddings, next_state_embeddings, is_demos,
num_actions, 'cpu', supervised_loss_weight, expert_margin)
resampled_batch_margin_loss = get_margin_loss(actions, num_actions, is_demos, expert_margin, 'cpu')
recalculated_quantiles = test_iqn.calculate_quantiles(taus, state_embeddings=state_embeddings)
recalculated_q = recalculated_quantiles.mean(dim=1)
recalculated_loss = calculate_expert_loss(recalculated_q, resampled_batch_margin_loss, is_demos,
actions, supervised_loss_weight * is_demos.squeeze())
assert recalculated_loss.mean () == supervised_classification_loss
def test_supervised_margin_loss_zero_states(self):
params = ParameterServer()
states = torch.zeros((1, 4))
state_shape = spaces.Box(low=np.zeros(4), high=np.zeros(4))
test_iqn = TestIQN(num_channels=state_shape.shape[0], num_actions=2, params=params,
num_cosines=4, noisy_net=False)
state_embeddings = test_iqn.dqn_net(states)
assert(torch.all(state_embeddings == 0.0))
def test_supervised_margin_loss_states(self):
num_actions = 2
params = ParameterServer()
batch_size = 512
state_size = 1
state_shape = spaces.Box(low=np.zeros(state_size), high=np.zeros(state_size))
online_iqn = TestIQN(num_channels=state_shape.shape[0], num_actions=num_actions, params=params,
num_cosines=4, noisy_net=False)
target_iqn = TestIQN(num_channels=state_shape.shape[0], num_actions=num_actions, params=params,
num_cosines=4, noisy_net=False)
optim = Adam(online_iqn.parameters(),
lr=5.5e-3,
eps=1e-2 / batch_size)
online_iqn.train()
target_iqn.train()
states = torch.rand((batch_size, state_size))
actions = torch.zeros((batch_size, 1), dtype=torch.int64)
actions[states >= 0.5] = 1.0
online_iqn.sample_noise()
loss = Variable(requires_grad=True)
is_demos = torch.zeros((batch_size, 1))
is_demos[(actions.squeeze() == 1.0).nonzero()] = 1.0
for i in range(100):
is_demos[(actions.squeeze() == 1.0).nonzero()] = 1.0
next_states = torch.rand((batch_size, state_size))
state_embeddings = online_iqn.dqn_net(states)
next_state_embeddings = target_iqn.dqn_net(states=next_states)
# sample tau random quantiles from online network
taus = torch.rand(batch_size, 4)
current_sa_quantiles = evaluate_quantile_at_action(
online_iqn.calculate_quantiles(taus,
state_embeddings=state_embeddings),
actions
)
current_q_values = online_iqn.calculate_q(states=states)
online_iqn.sample_noise()
next_q = online_iqn.calculate_q(states=next_states)
next_actions = torch.argmax(next_q, dim=1, keepdim=True)
tau_dashes = torch.rand(batch_size, 4)
target_sa_quantiles = evaluate_quantile_at_action(
target_iqn.calculate_quantiles(
tau_dashes, next_state_embeddings
), next_actions
).transpose(1, 2)
td_errors = target_sa_quantiles - current_sa_quantiles
supervised_classification_loss = calculate_supervised_classification_quantile_loss(
actions, states, online_iqn, tau_dashes, state_embeddings, next_state_embeddings, is_demos,
num_actions, 'cpu', 0.5, 0.8
)
loss = supervised_classification_loss
gradients = update_params(optim, loss, [online_iqn], retain_graph=True, count=i)
states = next_states
actions = next_actions
if i % 25 == 0:
target_iqn.load_state_dict(online_iqn.state_dict())
assert loss == 0.0
if __name__ == "__main__":
unittest.main() | [
"unittest.main",
"bark.runtime.commons.parameters.ParameterServer",
"torch.nn.ReLU",
"bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.utils.update_params",
"torch.autograd.Variable",
"torch.argmax",
"bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.utils.calculate_huber_loss",
"numpy.zeros",
"bark_ml.library_w... | [((10592, 10607), 'unittest.main', 'unittest.main', ([], {}), '()\n', (10605, 10607), False, 'import unittest\n'), ((5138, 5160), 'torch.zeros', 'torch.zeros', (['(1, 2, 1)'], {}), '((1, 2, 1))\n', (5149, 5160), False, 'import torch\n'), ((5242, 5258), 'torch.rand', 'torch.rand', (['(1)', '(2)'], {}), '(1, 2)\n', (5252, 5258), False, 'import torch\n'), ((5689, 5706), 'bark.runtime.commons.parameters.ParameterServer', 'ParameterServer', ([], {}), '()\n', (5704, 5706), False, 'from bark.runtime.commons.parameters import ParameterServer\n'), ((5724, 5760), 'torch.rand', 'torch.rand', (['(batch_size, state_size)'], {}), '((batch_size, state_size))\n', (5734, 5760), False, 'import torch\n'), ((5783, 5819), 'torch.rand', 'torch.rand', (['(batch_size, state_size)'], {}), '((batch_size, state_size))\n', (5793, 5819), False, 'import torch\n'), ((5838, 5885), 'torch.zeros', 'torch.zeros', (['(batch_size, 1)'], {'dtype': 'torch.int64'}), '((batch_size, 1), dtype=torch.int64)\n', (5849, 5885), False, 'import torch\n'), ((5942, 5970), 'torch.zeros', 'torch.zeros', (['(batch_size, 1)'], {}), '((batch_size, 1))\n', (5953, 5970), False, 'import torch\n'), ((6295, 6329), 'torch.rand', 'torch.rand', (['batch_size', 'test_iqn.N'], {}), '(batch_size, test_iqn.N)\n', (6305, 6329), False, 'import torch\n'), ((6487, 6687), 'bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.utils.calculate_supervised_classification_quantile_loss', 'calculate_supervised_classification_quantile_loss', (['actions', 'states', 'test_iqn', 'taus', 'state_embeddings', 'next_state_embeddings', 'is_demos', 'num_actions', '"""cpu"""', 'supervised_loss_weight', 'expert_margin'], {}), "(actions, states, test_iqn,\n taus, state_embeddings, next_state_embeddings, is_demos, num_actions,\n 'cpu', supervised_loss_weight, expert_margin)\n", (6536, 6687), False, 'from bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.utils import calculate_expert_loss, calculate_supervised_classification_quantile_loss, calculate_huber_loss, evaluate_quantile_at_action, get_margin_loss, update_params\n'), ((6739, 6808), 'bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.utils.get_margin_loss', 'get_margin_loss', (['actions', 'num_actions', 'is_demos', 'expert_margin', '"""cpu"""'], {}), "(actions, num_actions, is_demos, expert_margin, 'cpu')\n", (6754, 6808), False, 'from bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.utils import calculate_expert_loss, calculate_supervised_classification_quantile_loss, calculate_huber_loss, evaluate_quantile_at_action, get_margin_loss, update_params\n'), ((7331, 7348), 'bark.runtime.commons.parameters.ParameterServer', 'ParameterServer', ([], {}), '()\n', (7346, 7348), False, 'from bark.runtime.commons.parameters import ParameterServer\n'), ((7366, 7385), 'torch.zeros', 'torch.zeros', (['(1, 4)'], {}), '((1, 4))\n', (7377, 7385), False, 'import torch\n'), ((7673, 7707), 'torch.all', 'torch.all', (['(state_embeddings == 0.0)'], {}), '(state_embeddings == 0.0)\n', (7682, 7707), False, 'import torch\n'), ((7805, 7822), 'bark.runtime.commons.parameters.ParameterServer', 'ParameterServer', ([], {}), '()\n', (7820, 7822), False, 'from bark.runtime.commons.parameters import ParameterServer\n'), ((8482, 8518), 'torch.rand', 'torch.rand', (['(batch_size, state_size)'], {}), '((batch_size, state_size))\n', (8492, 8518), False, 'import torch\n'), ((8537, 8584), 'torch.zeros', 'torch.zeros', (['(batch_size, 1)'], {'dtype': 'torch.int64'}), '((batch_size, 1), dtype=torch.int64)\n', (8548, 8584), False, 'import torch\n'), ((8671, 8699), 'torch.autograd.Variable', 'Variable', ([], {'requires_grad': '(True)'}), '(requires_grad=True)\n', (8679, 8699), False, 'from torch.autograd import Variable\n'), ((8720, 8748), 'torch.zeros', 'torch.zeros', (['(batch_size, 1)'], {}), '((batch_size, 1))\n', (8731, 8748), False, 'import torch\n'), ((2071, 2080), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2078, 2080), False, 'from torch import nn\n'), ((3077, 3086), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3084, 3086), False, 'from torch import nn\n'), ((8926, 8962), 'torch.rand', 'torch.rand', (['(batch_size, state_size)'], {}), '((batch_size, state_size))\n', (8936, 8962), False, 'import torch\n'), ((9170, 9195), 'torch.rand', 'torch.rand', (['batch_size', '(4)'], {}), '(batch_size, 4)\n', (9180, 9195), False, 'import torch\n'), ((9607, 9648), 'torch.argmax', 'torch.argmax', (['next_q'], {'dim': '(1)', 'keepdim': '(True)'}), '(next_q, dim=1, keepdim=True)\n', (9619, 9648), False, 'import torch\n'), ((9672, 9697), 'torch.rand', 'torch.rand', (['batch_size', '(4)'], {}), '(batch_size, 4)\n', (9682, 9697), False, 'import torch\n'), ((10015, 10194), 'bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.utils.calculate_supervised_classification_quantile_loss', 'calculate_supervised_classification_quantile_loss', (['actions', 'states', 'online_iqn', 'tau_dashes', 'state_embeddings', 'next_state_embeddings', 'is_demos', 'num_actions', '"""cpu"""', '(0.5)', '(0.8)'], {}), "(actions, states,\n online_iqn, tau_dashes, state_embeddings, next_state_embeddings,\n is_demos, num_actions, 'cpu', 0.5, 0.8)\n", (10064, 10194), False, 'from bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.utils import calculate_expert_loss, calculate_supervised_classification_quantile_loss, calculate_huber_loss, evaluate_quantile_at_action, get_margin_loss, update_params\n'), ((10293, 10361), 'bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.utils.update_params', 'update_params', (['optim', 'loss', '[online_iqn]'], {'retain_graph': '(True)', 'count': 'i'}), '(optim, loss, [online_iqn], retain_graph=True, count=i)\n', (10306, 10361), False, 'from bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.utils import calculate_expert_loss, calculate_supervised_classification_quantile_loss, calculate_huber_loss, evaluate_quantile_at_action, get_margin_loss, update_params\n'), ((5309, 5353), 'bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.utils.calculate_huber_loss', 'calculate_huber_loss', (['td_errors'], {'kappa': 'kappa'}), '(td_errors, kappa=kappa)\n', (5329, 5353), False, 'from bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.utils import calculate_expert_loss, calculate_supervised_classification_quantile_loss, calculate_huber_loss, evaluate_quantile_at_action, get_margin_loss, update_params\n'), ((6069, 6089), 'numpy.zeros', 'np.zeros', (['state_size'], {}), '(state_size)\n', (6077, 6089), True, 'import numpy as np\n'), ((6096, 6116), 'numpy.zeros', 'np.zeros', (['state_size'], {}), '(state_size)\n', (6104, 6116), True, 'import numpy as np\n'), ((7423, 7434), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (7431, 7434), True, 'import numpy as np\n'), ((7441, 7452), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (7449, 7452), True, 'import numpy as np\n'), ((7908, 7928), 'numpy.zeros', 'np.zeros', (['state_size'], {}), '(state_size)\n', (7916, 7928), True, 'import numpy as np\n'), ((7935, 7955), 'numpy.zeros', 'np.zeros', (['state_size'], {}), '(state_size)\n', (7943, 7955), True, 'import numpy as np\n'), ((1417, 1454), 'torch.nn.Linear', 'torch.nn.Linear', (['num_channels', 'hidden'], {}), '(num_channels, hidden)\n', (1432, 1454), False, 'import torch\n'), ((1464, 1502), 'torch.nn.Linear', 'torch.nn.Linear', (['hidden', 'embedding_dim'], {}), '(hidden, embedding_dim)\n', (1479, 1502), False, 'import torch\n'), ((2294, 2384), 'torch.arange', 'torch.arange', ([], {'start': '(1)', 'end': '(self.num_cosines + 1)', 'dtype': 'taus.dtype', 'device': 'taus.device'}), '(start=1, end=self.num_cosines + 1, dtype=taus.dtype, device=\n taus.device)\n', (2306, 2384), False, 'import torch\n')] |
import logging
import numpy as np
import os
import pickle
import scipy.sparse as sp
import sys
import tensorflow as tf
from scipy.sparse import linalg
from datetime import datetime #added
class DataLoader(object):
def __init__(self, xs, ys, batch_size, pad_with_last_sample=True, shuffle=False):
"""
:param xs:
:param ys:
:param batch_size:
:param pad_with_last_sample: pad with the last sample to make number of samples divisible to batch_size.
"""
self.batch_size = batch_size
self.current_ind = 0
if pad_with_last_sample:
num_padding = (batch_size - (len(xs) % batch_size)) % batch_size
x_padding = np.repeat(xs[-1:], num_padding, axis=0)
y_padding = np.repeat(ys[-1:], num_padding, axis=0)
xs = np.concatenate([xs, x_padding], axis=0)
ys = np.concatenate([ys, y_padding], axis=0)
self.size = len(xs)
self.num_batch = int(self.size // self.batch_size)
if shuffle:
permutation = np.random.permutation(self.size)
xs, ys = xs[permutation], ys[permutation]
self.xs = xs
self.ys = ys
def get_iterator(self):
self.current_ind = 0
def _wrapper():
while self.current_ind < self.num_batch:
start_ind = self.batch_size * self.current_ind
end_ind = min(self.size, self.batch_size * (self.current_ind + 1))
x_i = self.xs[start_ind: end_ind, ...]
y_i = self.ys[start_ind: end_ind, ...]
yield (x_i, y_i)
self.current_ind += 1
return _wrapper()
class StandardScaler:
"""
Standard the input
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def transform(self, data):
return (data - self.mean) / self.std
def inverse_transform(self, data):
return (data * self.std) + self.mean
def add_simple_summary(writer, names, values, global_step):
"""
Writes summary for a list of scalars.
:param writer:
:param names:
:param values:
:param global_step:
:return:
"""
for name, value in zip(names, values):
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = name
writer.add_summary(summary, global_step)
def calculate_normalized_laplacian(adj):
"""
# L = D^-1/2 (D-A) D^-1/2 = I - D^-1/2 A D^-1/2
# D = diag(A 1)
:param adj:
:return:
"""
adj = sp.coo_matrix(adj)
d = np.array(adj.sum(1))
d_inv_sqrt = np.power(d, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
normalized_laplacian = sp.eye(adj.shape[0]) - adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
return normalized_laplacian
def calculate_random_walk_matrix(adj_mx):
adj_mx = sp.coo_matrix(adj_mx)
d = np.array(adj_mx.sum(1))
d_inv = np.power(d, -1).flatten()
d_inv[np.isinf(d_inv)] = 0.
d_mat_inv = sp.diags(d_inv)
random_walk_mx = d_mat_inv.dot(adj_mx).tocoo()
return random_walk_mx
def calculate_reverse_random_walk_matrix(adj_mx):
return calculate_random_walk_matrix(np.transpose(adj_mx))
def calculate_scaled_laplacian(adj_mx, lambda_max=2, undirected=True):
if undirected:
adj_mx = np.maximum.reduce([adj_mx, adj_mx.T])
L = calculate_normalized_laplacian(adj_mx)
if lambda_max is None:
lambda_max, _ = linalg.eigsh(L, 1, which='LM')
lambda_max = lambda_max[0]
L = sp.csr_matrix(L)
M, _ = L.shape
I = sp.identity(M, format='csr', dtype=L.dtype)
L = (2 / lambda_max * L) - I
return L.astype(np.float32)
def config_logging(log_dir, log_filename='info.log', level=logging.INFO):
# Add file handler and stdout handler
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Create the log directory if necessary.
try:
os.makedirs(log_dir)
except OSError:
pass
file_handler = logging.FileHandler(os.path.join(log_dir, log_filename))
file_handler.setFormatter(formatter)
file_handler.setLevel(level=level)
# Add console handler.
console_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(console_formatter)
console_handler.setLevel(level=level)
logging.basicConfig(handlers=[file_handler, console_handler], level=level)
def get_logger(log_dir, name, log_filename='info.log', level=logging.INFO):
logger = logging.getLogger(name)
logger.setLevel(level)
# Add file handler and stdout handler
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler(os.path.join(log_dir, log_filename))
file_handler.setFormatter(formatter)
# Add console handler.
console_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(console_formatter)
logger.addHandler(file_handler)
logger.addHandler(console_handler)
# Add google cloud log handler
logger.info('Log directory: %s', log_dir)
return logger
def get_total_trainable_parameter_size():
"""
Calculates the total number of trainable parameters in the current graph.
:return:
"""
total_parameters = 0
for variable in tf.trainable_variables():
# shape is an array of tf.Dimension
total_parameters += np.product([x.value for x in variable.get_shape()])
return total_parameters
def load_dataset(dataset_dir, batch_size, test_batch_size=None, **kwargs):
data = {}
for category in ['train', 'val', 'test']:
cat_data = np.load(os.path.join(dataset_dir, category + '.npz'),allow_pickle=True)
data['x_' + category] = cat_data['x']
data['y_' + category] = cat_data['y']
scaler = StandardScaler(data['x_train'][..., 0].mean(), data['x_train'][..., 0].std()) # M scaler = StandardScaler(mean=data['x_train'][..., 0].mean(), data['x_train'][..., 0].std())
# Data format
for category in ['train', 'val', 'test']:
data['x_' + category][..., 0] = scaler.transform(data['x_' + category][..., 0])
data['y_' + category][..., 0] = scaler.transform(data['y_' + category][..., 0])
data['train_loader'] = DataLoader(data['x_train'], data['y_train'], batch_size, shuffle=True)
data['val_loader'] = DataLoader(data['x_val'], data['y_val'], test_batch_size, shuffle=False)
data['test_loader'] = DataLoader(data['x_test'], data['y_test'], test_batch_size, shuffle=False)
data['scaler'] = scaler
return data
'''
def load_dataset_with_time(dataset_dir, batch_size, test_batch_size=None, **kwargs):
data = {}
for category in ['train', 'val', 'test']:
cat_data = np.load(os.path.join(dataset_dir, category + '.npz'))
data['x_' + category] = cat_data['x']
data['y_' + category] = cat_data['y']
data['time_' + category] = cat_data['time']
scaler = StandardScaler(mean=data['x_train'][..., 0].mean(), std=data['x_train'][..., 0].std())
# Data format
for category in ['train', 'val', 'test']:
data['x_' + category][..., 0] = scaler.transform(data['x_' + category][..., 0])
data['y_' + category][..., 0] = scaler.transform(data['y_' + category][..., 0])
data['train_loader'] = DataLoader(data['x_train'], data['y_train'], data['time_train'], batch_size, shuffle=True)
data['val_loader'] = DataLoader(data['x_val'], data['y_val'], data['time_val'], test_batch_size, shuffle=False)
data['test_loader'] = DataLoader(data['x_test'], data['y_test'], data['time_test'], test_batch_size, shuffle=False)
data['scaler'] = scaler
return data
'''
def load_graph_data(pkl_filename):
sensor_ids, sensor_id_to_ind, adj_mx = load_pickle(pkl_filename)
return sensor_ids, sensor_id_to_ind, adj_mx
def load_pickle(pickle_file):
try:
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
except UnicodeDecodeError as e:
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f, encoding='latin1')
except Exception as e:
print('Unable to load data ', pickle_file, ':', e)
raise
return pickle_data
| [
"tensorflow.trainable_variables",
"logging.Formatter",
"pickle.load",
"numpy.maximum.reduce",
"os.path.join",
"scipy.sparse.eye",
"tensorflow.Summary",
"numpy.power",
"numpy.transpose",
"scipy.sparse.coo_matrix",
"scipy.sparse.identity",
"numpy.repeat",
"scipy.sparse.diags",
"logging.Strea... | [((2704, 2722), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['adj'], {}), '(adj)\n', (2717, 2722), True, 'import scipy.sparse as sp\n'), ((2864, 2884), 'scipy.sparse.diags', 'sp.diags', (['d_inv_sqrt'], {}), '(d_inv_sqrt)\n', (2872, 2884), True, 'import scipy.sparse as sp\n'), ((3094, 3115), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['adj_mx'], {}), '(adj_mx)\n', (3107, 3115), True, 'import scipy.sparse as sp\n'), ((3238, 3253), 'scipy.sparse.diags', 'sp.diags', (['d_inv'], {}), '(d_inv)\n', (3246, 3253), True, 'import scipy.sparse as sp\n'), ((3780, 3796), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['L'], {}), '(L)\n', (3793, 3796), True, 'import scipy.sparse as sp\n'), ((3826, 3869), 'scipy.sparse.identity', 'sp.identity', (['M'], {'format': '"""csr"""', 'dtype': 'L.dtype'}), "(M, format='csr', dtype=L.dtype)\n", (3837, 3869), True, 'import scipy.sparse as sp\n'), ((4076, 4149), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (4093, 4149), False, 'import logging\n'), ((4483, 4545), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(levelname)s - %(message)s')\n", (4500, 4545), False, 'import logging\n'), ((4569, 4602), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (4590, 4602), False, 'import logging\n'), ((4704, 4778), 'logging.basicConfig', 'logging.basicConfig', ([], {'handlers': '[file_handler, console_handler]', 'level': 'level'}), '(handlers=[file_handler, console_handler], level=level)\n', (4723, 4778), False, 'import logging\n'), ((4874, 4897), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (4891, 4897), False, 'import logging\n'), ((4986, 5059), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (5003, 5059), False, 'import logging\n'), ((5232, 5294), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(levelname)s - %(message)s')\n", (5249, 5294), False, 'import logging\n'), ((5318, 5351), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (5339, 5351), False, 'import logging\n'), ((5789, 5813), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (5811, 5813), True, 'import tensorflow as tf\n'), ((2338, 2350), 'tensorflow.Summary', 'tf.Summary', ([], {}), '()\n', (2348, 2350), True, 'import tensorflow as tf\n'), ((2815, 2835), 'numpy.isinf', 'np.isinf', (['d_inv_sqrt'], {}), '(d_inv_sqrt)\n', (2823, 2835), True, 'import numpy as np\n'), ((2913, 2933), 'scipy.sparse.eye', 'sp.eye', (['adj.shape[0]'], {}), '(adj.shape[0])\n', (2919, 2933), True, 'import scipy.sparse as sp\n'), ((3199, 3214), 'numpy.isinf', 'np.isinf', (['d_inv'], {}), '(d_inv)\n', (3207, 3214), True, 'import numpy as np\n'), ((3429, 3449), 'numpy.transpose', 'np.transpose', (['adj_mx'], {}), '(adj_mx)\n', (3441, 3449), True, 'import numpy as np\n'), ((3565, 3602), 'numpy.maximum.reduce', 'np.maximum.reduce', (['[adj_mx, adj_mx.T]'], {}), '([adj_mx, adj_mx.T])\n', (3582, 3602), True, 'import numpy as np\n'), ((3704, 3734), 'scipy.sparse.linalg.eigsh', 'linalg.eigsh', (['L', '(1)'], {'which': '"""LM"""'}), "(L, 1, which='LM')\n", (3716, 3734), False, 'from scipy.sparse import linalg\n'), ((4215, 4235), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (4226, 4235), False, 'import os\n'), ((4311, 4346), 'os.path.join', 'os.path.join', (['log_dir', 'log_filename'], {}), '(log_dir, log_filename)\n', (4323, 4346), False, 'import os\n'), ((5100, 5135), 'os.path.join', 'os.path.join', (['log_dir', 'log_filename'], {}), '(log_dir, log_filename)\n', (5112, 5135), False, 'import os\n'), ((732, 771), 'numpy.repeat', 'np.repeat', (['xs[-1:]', 'num_padding'], {'axis': '(0)'}), '(xs[-1:], num_padding, axis=0)\n', (741, 771), True, 'import numpy as np\n'), ((797, 836), 'numpy.repeat', 'np.repeat', (['ys[-1:]', 'num_padding'], {'axis': '(0)'}), '(ys[-1:], num_padding, axis=0)\n', (806, 836), True, 'import numpy as np\n'), ((855, 894), 'numpy.concatenate', 'np.concatenate', (['[xs, x_padding]'], {'axis': '(0)'}), '([xs, x_padding], axis=0)\n', (869, 894), True, 'import numpy as np\n'), ((913, 952), 'numpy.concatenate', 'np.concatenate', (['[ys, y_padding]'], {'axis': '(0)'}), '([ys, y_padding], axis=0)\n', (927, 952), True, 'import numpy as np\n'), ((1090, 1122), 'numpy.random.permutation', 'np.random.permutation', (['self.size'], {}), '(self.size)\n', (1111, 1122), True, 'import numpy as np\n'), ((2771, 2788), 'numpy.power', 'np.power', (['d', '(-0.5)'], {}), '(d, -0.5)\n', (2779, 2788), True, 'import numpy as np\n'), ((3162, 3177), 'numpy.power', 'np.power', (['d', '(-1)'], {}), '(d, -1)\n', (3170, 3177), True, 'import numpy as np\n'), ((6140, 6184), 'os.path.join', 'os.path.join', (['dataset_dir', "(category + '.npz')"], {}), "(dataset_dir, category + '.npz')\n", (6152, 6184), False, 'import os\n'), ((8482, 8496), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8493, 8496), False, 'import pickle\n'), ((8605, 8638), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (8616, 8638), False, 'import pickle\n')] |
"""
Interpolate frames from two frames using SuperSloMo version
"""
import argparse
from time import time
import os
import click
import cv2
import torch
from PIL import Image
import numpy as np
import model
from torchvision import transforms
from torch.functional import F
torch.set_grad_enabled(False)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
trans_forward = transforms.ToTensor()
trans_backward = transforms.ToPILImage()
if device != "cpu":
mean = [0.429, 0.431, 0.397]
mea0 = [-m for m in mean]
std = [1] * 3
trans_forward = transforms.Compose([trans_forward, transforms.Normalize(mean=mean, std=std)])
trans_backward = transforms.Compose([transforms.Normalize(mean=mea0, std=std), trans_backward])
flow = model.UNet(6, 4).to(device)
interp = model.UNet(20, 5).to(device)
back_warp = None
def setup_back_warp(w, h):
global back_warp
with torch.set_grad_enabled(False):
back_warp = model.backWarp(w, h, device).to(device)
def load_models(checkpoint):
states = torch.load(checkpoint, map_location='cpu')
interp.load_state_dict(states['state_dictAT'])
flow.load_state_dict(states['state_dictFC'])
def interpolate_batch(frames, factor):
frame0 = torch.stack(frames[:-1])
frame1 = torch.stack(frames[1:])
i0 = frame0.to(device)
i1 = frame1.to(device)
ix = torch.cat([i0, i1], dim=1)
flow_out = flow(ix)
f01 = flow_out[:, :2, :, :]
f10 = flow_out[:, 2:, :, :]
frame_buffer = []
for i in range(1, factor):
t = i / factor
temp = -t * (1 - t)
co_eff = [temp, t * t, (1 - t) * (1 - t), temp]
ft0 = co_eff[0] * f01 + co_eff[1] * f10
ft1 = co_eff[2] * f01 + co_eff[3] * f10
gi0ft0 = back_warp(i0, ft0)
gi1ft1 = back_warp(i1, ft1)
iy = torch.cat((i0, i1, f01, f10, ft1, ft0, gi1ft1, gi0ft0), dim=1)
io = interp(iy)
ft0f = io[:, :2, :, :] + ft0
ft1f = io[:, 2:4, :, :] + ft1
vt0 = F.sigmoid(io[:, 4:5, :, :])
vt1 = 1 - vt0
gi0ft0f = back_warp(i0, ft0f)
gi1ft1f = back_warp(i1, ft1f)
co_eff = [1 - t, t]
ft_p = (co_eff[0] * vt0 * gi0ft0f + co_eff[1] * vt1 * gi1ft1f) / \
(co_eff[0] * vt0 + co_eff[1] * vt1)
frame_buffer.append(ft_p)
return frame_buffer
def load_batch(first_frame, second_frame, w, h):
batch = []
for frame in (first_frame, second_frame):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = Image.fromarray(frame)
frame = frame.resize((w, h), Image.ANTIALIAS)
frame = frame.convert('RGB')
frame = trans_forward(frame)
batch.append(frame)
return batch
def denorm_frame(frame, w0, h0):
frame = frame.cpu()
frame = trans_backward(frame)
frame = frame.resize((w0, h0), Image.BILINEAR)
frame = frame.convert('RGB')
return np.array(frame)[:, :, ::-1].copy()
def save_inter_frames(first_frame, second_frame, factor, dest):
first_frame = cv2.imread(first_frame)
second_frame = cv2.imread(second_frame)
h0 = first_frame.shape[0]
w0 = first_frame.shape[1]
w, h = (w0 // 32) * 32, (h0 // 32) * 32
setup_back_warp(w, h)
batch = load_batch(first_frame, second_frame, w, h)
intermediate_frames = interpolate_batch(batch, factor)
#intermediate_frames = list(zip(*intermediate_frames))
for fid, iframe in enumerate(intermediate_frames):
print(iframe.shape)
for frm in iframe:
f = denorm_frame(frm, w0, h0)
cv2.imwrite('%d.jpg' % fid, f)
def check_path(path):
if not os.path.exists(path):
os.mkdir(path)
'''
@click.command('Evaluate Model by converting a low-FPS video to high-fps')
@click.argument('input')
@click.option('--checkpoint', help='Path to model checkpoint')
@click.option('--output', help='Path to output file to save')
@click.option('--first_frame', default=2, help='path')
@click.option('--second_frame', default=30, help='path')
@click.option('--scale', default=4, help='Scale Factor of FPS')
'''
def main(checkpoint, first_frame, second_frame, scale, output):
load_models(checkpoint)
save_inter_frames(first_frame, second_frame, scale, output)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', type = str, default = 'SuperSloMo.ckpt', help = 'root for model')
parser.add_argument('--first_frame', type = str, default = './DAVIS_frame\\bear\\00000.jpg', help = 'first frame')
parser.add_argument('--second_frame', type = str, default = './DAVIS_frame\\bear\\00001.jpg', help = 'second frame')
parser.add_argument('--scale', type = int, default = 4, help = 'interpolate num')
parser.add_argument('--output', type = str, default = '', help = 'output dir')
opt = parser.parse_args()
main(checkpoint = opt.checkpoint, \
first_frame = opt.first_frame, \
second_frame = opt.second_frame, \
scale = opt.scale, output = opt.output)
| [
"os.mkdir",
"argparse.ArgumentParser",
"torch.cat",
"torchvision.transforms.Normalize",
"cv2.cvtColor",
"cv2.imwrite",
"torch.load",
"torchvision.transforms.ToPILImage",
"os.path.exists",
"torch.cuda.is_available",
"torch.set_grad_enabled",
"torch.functional.F.sigmoid",
"torch.stack",
"mod... | [((275, 304), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (297, 304), False, 'import torch\n'), ((392, 413), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (411, 413), False, 'from torchvision import transforms\n'), ((431, 454), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (452, 454), False, 'from torchvision import transforms\n'), ((1039, 1081), 'torch.load', 'torch.load', (['checkpoint'], {'map_location': '"""cpu"""'}), "(checkpoint, map_location='cpu')\n", (1049, 1081), False, 'import torch\n'), ((1237, 1261), 'torch.stack', 'torch.stack', (['frames[:-1]'], {}), '(frames[:-1])\n', (1248, 1261), False, 'import torch\n'), ((1275, 1298), 'torch.stack', 'torch.stack', (['frames[1:]'], {}), '(frames[1:])\n', (1286, 1298), False, 'import torch\n'), ((1363, 1389), 'torch.cat', 'torch.cat', (['[i0, i1]'], {'dim': '(1)'}), '([i0, i1], dim=1)\n', (1372, 1389), False, 'import torch\n'), ((3033, 3056), 'cv2.imread', 'cv2.imread', (['first_frame'], {}), '(first_frame)\n', (3043, 3056), False, 'import cv2\n'), ((3076, 3100), 'cv2.imread', 'cv2.imread', (['second_frame'], {}), '(second_frame)\n', (3086, 3100), False, 'import cv2\n'), ((4299, 4324), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4322, 4324), False, 'import argparse\n'), ((337, 362), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (360, 362), False, 'import torch\n'), ((762, 778), 'model.UNet', 'model.UNet', (['(6)', '(4)'], {}), '(6, 4)\n', (772, 778), False, 'import model\n'), ((799, 816), 'model.UNet', 'model.UNet', (['(20)', '(5)'], {}), '(20, 5)\n', (809, 816), False, 'import model\n'), ((904, 933), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (926, 933), False, 'import torch\n'), ((1824, 1886), 'torch.cat', 'torch.cat', (['(i0, i1, f01, f10, ft1, ft0, gi1ft1, gi0ft0)'], {'dim': '(1)'}), '((i0, i1, f01, f10, ft1, ft0, gi1ft1, gi0ft0), dim=1)\n', (1833, 1886), False, 'import torch\n'), ((2001, 2028), 'torch.functional.F.sigmoid', 'F.sigmoid', (['io[:, 4:5, :, :]'], {}), '(io[:, 4:5, :, :])\n', (2010, 2028), False, 'from torch.functional import F\n'), ((2473, 2511), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (2485, 2511), False, 'import cv2\n'), ((2528, 2550), 'PIL.Image.fromarray', 'Image.fromarray', (['frame'], {}), '(frame)\n', (2543, 2550), False, 'from PIL import Image\n'), ((3642, 3662), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (3656, 3662), False, 'import os\n'), ((3672, 3686), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (3680, 3686), False, 'import os\n'), ((611, 651), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': 'mean', 'std': 'std'}), '(mean=mean, std=std)\n', (631, 651), False, 'from torchvision import transforms\n'), ((695, 735), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': 'mea0', 'std': 'std'}), '(mean=mea0, std=std)\n', (715, 735), False, 'from torchvision import transforms\n'), ((3577, 3607), 'cv2.imwrite', 'cv2.imwrite', (["('%d.jpg' % fid)", 'f'], {}), "('%d.jpg' % fid, f)\n", (3588, 3607), False, 'import cv2\n'), ((955, 983), 'model.backWarp', 'model.backWarp', (['w', 'h', 'device'], {}), '(w, h, device)\n', (969, 983), False, 'import model\n'), ((2913, 2928), 'numpy.array', 'np.array', (['frame'], {}), '(frame)\n', (2921, 2928), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import serial
import time
ser = serial.Serial('/dev/ttyACM0', baudrate = 9600, timeout = 1)
cap = cv2.VideoCapture(0)
cap.set(3,1280)
cap.set(4,720)
path_lower = np.array([0,80,0])
path_upper = np.array([179,255,255])
green_upper = np.array([88,162,154]) # Green : switch to right track
green_lower = np.array([68,142,74])
violet_upper = np.array([140,140,150]) # Violet : switch to left track
violet_lower = np.array([120,105,95])
pink_upper = np.array([179,120,255]) # Pink : move to center of board and keep going till a line is reacquired
pink_lower = np.array([150,53,150])
font = cv2.FONT_HERSHEY_COMPLEX
kernel = np.ones((5,5),np.uint8)
path_num = 1
while True:
ret, frame = cap.read()
if not ret:
cap = cv2.VideoCapture(0)
continue
(h, w) = frame.shape[:2]
blur = cv2.GaussianBlur(frame,(5,5),cv2.BORDER_DEFAULT)
hsvvid = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
path_mask = cv2.inRange(hsvvid, path_lower, path_upper)
green_mask = cv2.inRange(hsvvid, green_lower, green_upper)
violet_mask = cv2.inRange(hsvvid, violet_lower, violet_upper)
pink_mask = cv2.inRange(hsvvid, pink_lower, pink_upper)
#cv2.imshow('Green mask', green_mask)
# cv2.imshow('Violet mask', violet_mask)
# cv2.imshow('Pink mask', pink_mask)
green_contours, hierarchy = cv2.findContours(green_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
violet_contours, hierarchy = cv2.findContours(violet_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
pink_contours, hierarchy = cv2.findContours(pink_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
opening = cv2.morphologyEx(path_mask, cv2.MORPH_OPEN, kernel)
erosion = cv2.erode(opening,kernel,iterations = 1)
dilation = cv2.dilate(erosion,kernel,iterations = 5)
path_contours, hierarchy = cv2.findContours(dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if len(green_contours) > 0:
largest_green = max(green_contours, key = cv2.contourArea)
x_green, y_green, w_green, h_green = cv2.boundingRect(largest_green)
if w_green*h_green > 5000:
path_num = 2
cv2.putText(frame, 'Switching tracks ...', (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)
# serial monitor instructions go here
break
elif len(violet_contours) > 0:
largest_violet = max(violet_contours, key = cv2.contourArea)
x_violet, y_violet, w_violet, h_violet = cv2.boundingRect(largest_violet)
if w_violet*h_violet > 5000:
path_num = 1
cv2.putText(frame, 'Switching tracks ...', (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)
# serial monitor instructions go here
break
elif len(pink_contours) > 0:
largest_pink = max(pink_contours, key = cv2.contourArea)
x_pink, y_pink, w_pink, h_pink = cv2.boundingRect(largest_pink)
if w_pink*h_pink > 5000:
cv2.putText(frame, 'Moving across center', (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)
# serial monitor instructions go here
break
if len(path_contours) > 0:
largest = max(path_contours, key = cv2.contourArea)
M_1 = cv2.moments(largest)
path_centroid_x = int(M_1['m10']/M_1['m00'])
path_centroid_y = int(M_1['m01'] / M_1['m00'])
if path_centroid_x < w/2 - 150:
i = 'l'
ser.write(i.encode())
print('go left')
left_text = 'Go left'
cv2.putText(frame, left_text, (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)
time.sleep(0.05)
elif path_centroid_x > w/2 + 150:
i = 'r'
ser.write(i.encode())
print('go right')
right_text = 'Go right'
cv2.putText(frame, right_text, (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)
time.sleep(0.05)
else:
i = 'f'
ser.write(i.encode())
print('go straight')
straight_text = 'Go straight'
cv2.putText(frame, straight_text, (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)
time.sleep(0.110)
else:
if path_num == 1 :
i = 'l'
elif path_num == 2 :
i = 'r'
ser.write(i.encode())
print('looking for path')
straight_text = 'looking for path'
cv2.putText(frame, straight_text, (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)
time.sleep(0.05)
cv2.imshow('path video', frame)
key = cv2.waitKey(1)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| [
"serial.Serial",
"cv2.GaussianBlur",
"cv2.boundingRect",
"cv2.putText",
"cv2.dilate",
"cv2.cvtColor",
"cv2.morphologyEx",
"cv2.waitKey",
"cv2.moments",
"cv2.imshow",
"numpy.ones",
"time.sleep",
"cv2.VideoCapture",
"numpy.array",
"cv2.erode",
"cv2.destroyAllWindows",
"cv2.inRange",
... | [((68, 123), 'serial.Serial', 'serial.Serial', (['"""/dev/ttyACM0"""'], {'baudrate': '(9600)', 'timeout': '(1)'}), "('/dev/ttyACM0', baudrate=9600, timeout=1)\n", (81, 123), False, 'import serial\n'), ((137, 156), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (153, 156), False, 'import cv2\n'), ((206, 226), 'numpy.array', 'np.array', (['[0, 80, 0]'], {}), '([0, 80, 0])\n', (214, 226), True, 'import numpy as np\n'), ((239, 264), 'numpy.array', 'np.array', (['[179, 255, 255]'], {}), '([179, 255, 255])\n', (247, 264), True, 'import numpy as np\n'), ((280, 304), 'numpy.array', 'np.array', (['[88, 162, 154]'], {}), '([88, 162, 154])\n', (288, 304), True, 'import numpy as np\n'), ((350, 373), 'numpy.array', 'np.array', (['[68, 142, 74]'], {}), '([68, 142, 74])\n', (358, 373), True, 'import numpy as np\n'), ((390, 415), 'numpy.array', 'np.array', (['[140, 140, 150]'], {}), '([140, 140, 150])\n', (398, 415), True, 'import numpy as np\n'), ((462, 486), 'numpy.array', 'np.array', (['[120, 105, 95]'], {}), '([120, 105, 95])\n', (470, 486), True, 'import numpy as np\n'), ((501, 526), 'numpy.array', 'np.array', (['[179, 120, 255]'], {}), '([179, 120, 255])\n', (509, 526), True, 'import numpy as np\n'), ((613, 637), 'numpy.array', 'np.array', (['[150, 53, 150]'], {}), '([150, 53, 150])\n', (621, 637), True, 'import numpy as np\n'), ((681, 706), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (688, 706), True, 'import numpy as np\n'), ((4772, 4795), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4793, 4795), False, 'import cv2\n'), ((877, 928), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['frame', '(5, 5)', 'cv2.BORDER_DEFAULT'], {}), '(frame, (5, 5), cv2.BORDER_DEFAULT)\n', (893, 928), False, 'import cv2\n'), ((940, 977), 'cv2.cvtColor', 'cv2.cvtColor', (['blur', 'cv2.COLOR_BGR2HSV'], {}), '(blur, cv2.COLOR_BGR2HSV)\n', (952, 977), False, 'import cv2\n'), ((997, 1040), 'cv2.inRange', 'cv2.inRange', (['hsvvid', 'path_lower', 'path_upper'], {}), '(hsvvid, path_lower, path_upper)\n', (1008, 1040), False, 'import cv2\n'), ((1059, 1104), 'cv2.inRange', 'cv2.inRange', (['hsvvid', 'green_lower', 'green_upper'], {}), '(hsvvid, green_lower, green_upper)\n', (1070, 1104), False, 'import cv2\n'), ((1124, 1171), 'cv2.inRange', 'cv2.inRange', (['hsvvid', 'violet_lower', 'violet_upper'], {}), '(hsvvid, violet_lower, violet_upper)\n', (1135, 1171), False, 'import cv2\n'), ((1189, 1232), 'cv2.inRange', 'cv2.inRange', (['hsvvid', 'pink_lower', 'pink_upper'], {}), '(hsvvid, pink_lower, pink_upper)\n', (1200, 1232), False, 'import cv2\n'), ((1401, 1473), 'cv2.findContours', 'cv2.findContours', (['green_mask', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(green_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (1417, 1473), False, 'import cv2\n'), ((1508, 1581), 'cv2.findContours', 'cv2.findContours', (['violet_mask', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(violet_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (1524, 1581), False, 'import cv2\n'), ((1614, 1685), 'cv2.findContours', 'cv2.findContours', (['pink_mask', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(pink_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (1630, 1685), False, 'import cv2\n'), ((1703, 1754), 'cv2.morphologyEx', 'cv2.morphologyEx', (['path_mask', 'cv2.MORPH_OPEN', 'kernel'], {}), '(path_mask, cv2.MORPH_OPEN, kernel)\n', (1719, 1754), False, 'import cv2\n'), ((1770, 1810), 'cv2.erode', 'cv2.erode', (['opening', 'kernel'], {'iterations': '(1)'}), '(opening, kernel, iterations=1)\n', (1779, 1810), False, 'import cv2\n'), ((1827, 1868), 'cv2.dilate', 'cv2.dilate', (['erosion', 'kernel'], {'iterations': '(5)'}), '(erosion, kernel, iterations=5)\n', (1837, 1868), False, 'import cv2\n'), ((1901, 1971), 'cv2.findContours', 'cv2.findContours', (['dilation', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (1917, 1971), False, 'import cv2\n'), ((4638, 4669), 'cv2.imshow', 'cv2.imshow', (['"""path video"""', 'frame'], {}), "('path video', frame)\n", (4648, 4669), False, 'import cv2\n'), ((4681, 4695), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4692, 4695), False, 'import cv2\n'), ((797, 816), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (813, 816), False, 'import cv2\n'), ((2121, 2152), 'cv2.boundingRect', 'cv2.boundingRect', (['largest_green'], {}), '(largest_green)\n', (2137, 2152), False, 'import cv2\n'), ((3308, 3328), 'cv2.moments', 'cv2.moments', (['largest'], {}), '(largest)\n', (3319, 3328), False, 'import cv2\n'), ((4524, 4609), 'cv2.putText', 'cv2.putText', (['frame', 'straight_text', '(5, 50)', 'font', '(2)', '(0, 0, 255)', '(2)', 'cv2.LINE_AA'], {}), '(frame, straight_text, (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA\n )\n', (4535, 4609), False, 'import cv2\n'), ((4614, 4630), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (4624, 4630), False, 'import time\n'), ((2228, 2321), 'cv2.putText', 'cv2.putText', (['frame', '"""Switching tracks ..."""', '(5, 50)', 'font', '(2)', '(0, 0, 255)', '(2)', 'cv2.LINE_AA'], {}), "(frame, 'Switching tracks ...', (5, 50), font, 2, (0, 0, 255), 2,\n cv2.LINE_AA)\n", (2239, 2321), False, 'import cv2\n'), ((2546, 2578), 'cv2.boundingRect', 'cv2.boundingRect', (['largest_violet'], {}), '(largest_violet)\n', (2562, 2578), False, 'import cv2\n'), ((3616, 3692), 'cv2.putText', 'cv2.putText', (['frame', 'left_text', '(5, 50)', 'font', '(2)', '(0, 0, 255)', '(2)', 'cv2.LINE_AA'], {}), '(frame, left_text, (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)\n', (3627, 3692), False, 'import cv2\n'), ((3706, 3722), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (3716, 3722), False, 'import time\n'), ((4704, 4718), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4715, 4718), False, 'import cv2\n'), ((2656, 2749), 'cv2.putText', 'cv2.putText', (['frame', '"""Switching tracks ..."""', '(5, 50)', 'font', '(2)', '(0, 0, 255)', '(2)', 'cv2.LINE_AA'], {}), "(frame, 'Switching tracks ...', (5, 50), font, 2, (0, 0, 255), 2,\n cv2.LINE_AA)\n", (2667, 2749), False, 'import cv2\n'), ((2960, 2990), 'cv2.boundingRect', 'cv2.boundingRect', (['largest_pink'], {}), '(largest_pink)\n', (2976, 2990), False, 'import cv2\n'), ((3905, 3982), 'cv2.putText', 'cv2.putText', (['frame', 'right_text', '(5, 50)', 'font', '(2)', '(0, 0, 255)', '(2)', 'cv2.LINE_AA'], {}), '(frame, right_text, (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA)\n', (3916, 3982), False, 'import cv2\n'), ((3996, 4012), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (4006, 4012), False, 'import time\n'), ((4176, 4261), 'cv2.putText', 'cv2.putText', (['frame', 'straight_text', '(5, 50)', 'font', '(2)', '(0, 0, 255)', '(2)', 'cv2.LINE_AA'], {}), '(frame, straight_text, (5, 50), font, 2, (0, 0, 255), 2, cv2.LINE_AA\n )\n', (4187, 4261), False, 'import cv2\n'), ((4270, 4286), 'time.sleep', 'time.sleep', (['(0.11)'], {}), '(0.11)\n', (4280, 4286), False, 'import time\n'), ((3038, 3131), 'cv2.putText', 'cv2.putText', (['frame', '"""Moving across center"""', '(5, 50)', 'font', '(2)', '(0, 0, 255)', '(2)', 'cv2.LINE_AA'], {}), "(frame, 'Moving across center', (5, 50), font, 2, (0, 0, 255), 2,\n cv2.LINE_AA)\n", (3049, 3131), False, 'import cv2\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import torch
from torch.autograd import Variable
from torch import nn
from torch.utils.data import DataLoader, Dataset, TensorDataset
from sklearn.metrics import accuracy_score
from sklearn.base import BaseEstimator
class StackedAutoEncoderClassifier(BaseEstimator):
def __init__(self, SAE, pretrain_epochs=100, finetune_epochs=100,
pretrain_optimizer_parameters=dict(lr=0.003, weight_decay=1e-5),
finetune_optimizer_parameters=dict(lr=0.003),
pretrain_batch_size=256, finetune_batch_size=256, pretrain_optimizer=None,
finetune_optimizer=None, patience=40,
device_name="auto", verbose=1, save_pretrain_model=False):
self._estimator_type = "classifier"
self.classes_ = None
if device_name == 'auto' or not device_name:
if torch.cuda.is_available():
device_name = 'cuda'
else:
device_name = 'cpu'
self.device = torch.device(device_name)
if verbose:
print("Info:", f"Device used : {self.device}")
self.SAE = SAE.to(self.device)
self.pretrain_epochs = pretrain_epochs
self.finetune_epochs = finetune_epochs
self.pretrain_batch_size = pretrain_batch_size
self.finetune_batch_size = finetune_batch_size
if pretrain_optimizer is None:
self.pretrain_optimizer = torch.optim.Adam(SAE.parameters(), **pretrain_optimizer_parameters)
else:
self.pretrain_optimizer = pretrain_optimizer
if finetune_optimizer is None:
self.finetune_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, SAE.parameters()),
**finetune_optimizer_parameters)
else:
self.finetune_optimizer = finetune_optimizer
self.patience = patience
self.verbose = verbose
self.save_pretrain_model = save_pretrain_model
self.pretrain_loss = nn.MSELoss()
self.finetune_loss = nn.CrossEntropyLoss()
def create_dataloader(self, X, y=None, batch_size=256, shuffle=False, device="cuda"):
"""
Return: DataLoader of tensor data.
"""
X = torch.tensor(X.values if isinstance(X, pd.DataFrame) else X, dtype=torch.float).to(self.device)
if y is not None:
y = torch.tensor(y, dtype=torch.float).to(self.device)
tensor_data = TensorDataset(X, y)
else:
tensor_data = TensorDataset(X)
dataloader = DataLoader(tensor_data, batch_size=batch_size, shuffle=shuffle)
return dataloader
def onehot(self, narr, nclass=None):
"""
:param narr: np.ndarray
return onehot ndarray.
"""
if not nclass:
nclass = np.max(narr) + 1
return np.eye(nclass)[narr]
def save_model(self, ):
"""save pretrained model.
"""
checkpoint = {'model': self.SAE,
'state_dict': self.SAE.state_dict(),
'pretrain_optimizer': self.pretrain_optimizer.state_dict()}
ckpt_file = 'SAE_pretrain.pth'
torch.save(checkpoint, ckpt_file)
return ckpt_file
def fit(self, X, y, is_pretrain=True, validation_data=None):
"""
:param X: np.ndarray
:param y: 1-dim np.ndarray, scalar value, if value == -1, it means unlabeled sample.
"""
# process data
unlabeledX = X[y == -1].values
labeledX = X[y != -1].values
nclass = np.max(y) + 1
self.classes_ = sorted(y[y != -1].unique())
labeled_y = y[y != -1]
if labeled_y.ndim == 1:
labeled_y = self.onehot(labeled_y)
if is_pretrain:
# step 1. pretrain
train_loader = self.create_dataloader(X, batch_size=self.pretrain_batch_size, shuffle=True)
min_loss = float('inf')
patience_counter = 0
for epoch in range(self.pretrain_epochs):
# train
self.SAE.train()
p_loss = 0.0
for i, (x_batch,) in enumerate(train_loader):
x_batch = Variable(x_batch).to(self.device)
# forward
encoder_x, output = self.SAE(x_batch)
loss = self.pretrain_loss(output, x_batch)
# ===================backward====================
self.pretrain_optimizer.zero_grad()
loss.backward()
p_loss += loss
self.pretrain_optimizer.step()
# ===================log========================
p_loss = p_loss.item() / (i + 1)
if p_loss <= min_loss:
patience_counter = 0
min_loss = p_loss
else:
patience_counter += 1
if self.verbose and (epoch % self.verbose == 0):
print('Info: epoch [{}/{}], loss:{:.4f}'
.format(epoch + 1, self.pretrain_epochs, p_loss))
if patience_counter >= self.patience:
break
if self.save_pretrain_model:
self.save_model()
# step 2. finetune
self.SAE.train()
train_loader = self.create_dataloader(labeledX, labeled_y, batch_size=self.finetune_batch_size)
if validation_data is not None:
if validation_data[1].ndim == 1:
valid_y = self.onehot(validation_data[1], nclass)
valid_loader = self.create_dataloader(validation_data[0], y=valid_y)
min_loss = float('inf')
patience_counter = 0
for ep in range(self.finetune_epochs):
self.SAE.train()
e_loss = 0
for i, (x_batch, y_batch) in enumerate(train_loader):
x_batch = Variable(x_batch).to(self.device)
prediction, reconstruct = self.SAE(x_batch)
# cross_entropy loss, (input,target)
loss = self.finetune_loss(prediction, torch.argmax(y_batch, dim=1))
self.finetune_optimizer.zero_grad()
loss.backward()
self.finetune_optimizer.step()
e_loss += loss
e_loss = e_loss.item() / (i + 1)
if validation_data is not None:
valid_loss = self.predict_epoch(valid_loader)
if valid_loss <= min_loss:
patience_counter = 0
min_loss = valid_loss
else:
patience_counter += 1
if self.verbose and (ep % self.verbose == 0):
print("Info: epoch:{}, loss:{:.4}, valid_loss:{:.4}".format(ep, e_loss, valid_loss))
if patience_counter >= self.patience:
break
else:
if self.verbose and (ep % self.verbose == 0):
print("Info: epoch:{},loss:{:.4}".format(ep, e_loss))
return self.SAE
def predict_epoch(self, valid_loader):
self.SAE.eval()
e_loss = 0
for i, (x_batch, y_batch) in enumerate(valid_loader):
x_batch = Variable(x_batch).to(self.device)
prediction, reconstruct = self.SAE(x_batch)
# cross_entropy loss, (input,target)
loss = self.finetune_loss(prediction, torch.argmax(y_batch, dim=1))
e_loss += loss
valid_loss = e_loss.item() / (i + 1)
return valid_loss
def predict(self, X_test):
"""
"""
test_preds = self.predict_proba(X_test)
pred = np.argmax(test_preds, axis=1)
return pred
def predict_proba(self, X_test):
"""
return np.ndarray.
"""
# eval test set
test_preds = []
test_loader = self.create_dataloader(X_test, shuffle=False)
self.SAE.eval()
for i, (x_batch,) in enumerate(test_loader):
y_pred, reconstruct = self.SAE(x_batch)
y_pred = y_pred.detach()
test_preds.append(y_pred.cpu().numpy())
test_preds = np.vstack(test_preds)
return test_preds
def score(self, X, y, sample_weight=None):
return accuracy_score(y, self.predict(X), sample_weight=sample_weight) | [
"torch.nn.MSELoss",
"torch.utils.data.DataLoader",
"numpy.argmax",
"torch.argmax",
"torch.autograd.Variable",
"torch.nn.CrossEntropyLoss",
"torch.save",
"numpy.max",
"torch.cuda.is_available",
"torch.utils.data.TensorDataset",
"torch.device",
"numpy.eye",
"torch.tensor",
"numpy.vstack"
] | [((1051, 1076), 'torch.device', 'torch.device', (['device_name'], {}), '(device_name)\n', (1063, 1076), False, 'import torch\n'), ((2072, 2084), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2082, 2084), False, 'from torch import nn\n'), ((2114, 2135), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2133, 2135), False, 'from torch import nn\n'), ((2619, 2682), 'torch.utils.data.DataLoader', 'DataLoader', (['tensor_data'], {'batch_size': 'batch_size', 'shuffle': 'shuffle'}), '(tensor_data, batch_size=batch_size, shuffle=shuffle)\n', (2629, 2682), False, 'from torch.utils.data import DataLoader, Dataset, TensorDataset\n'), ((3239, 3272), 'torch.save', 'torch.save', (['checkpoint', 'ckpt_file'], {}), '(checkpoint, ckpt_file)\n', (3249, 3272), False, 'import torch\n'), ((7767, 7796), 'numpy.argmax', 'np.argmax', (['test_preds'], {'axis': '(1)'}), '(test_preds, axis=1)\n', (7776, 7796), True, 'import numpy as np\n'), ((8261, 8282), 'numpy.vstack', 'np.vstack', (['test_preds'], {}), '(test_preds)\n', (8270, 8282), True, 'import numpy as np\n'), ((911, 936), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (934, 936), False, 'import torch\n'), ((2521, 2540), 'torch.utils.data.TensorDataset', 'TensorDataset', (['X', 'y'], {}), '(X, y)\n', (2534, 2540), False, 'from torch.utils.data import DataLoader, Dataset, TensorDataset\n'), ((2581, 2597), 'torch.utils.data.TensorDataset', 'TensorDataset', (['X'], {}), '(X)\n', (2594, 2597), False, 'from torch.utils.data import DataLoader, Dataset, TensorDataset\n'), ((2914, 2928), 'numpy.eye', 'np.eye', (['nclass'], {}), '(nclass)\n', (2920, 2928), True, 'import numpy as np\n'), ((3626, 3635), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (3632, 3635), True, 'import numpy as np\n'), ((2882, 2894), 'numpy.max', 'np.max', (['narr'], {}), '(narr)\n', (2888, 2894), True, 'import numpy as np\n'), ((7520, 7548), 'torch.argmax', 'torch.argmax', (['y_batch'], {'dim': '(1)'}), '(y_batch, dim=1)\n', (7532, 7548), False, 'import torch\n'), ((2444, 2478), 'torch.tensor', 'torch.tensor', (['y'], {'dtype': 'torch.float'}), '(y, dtype=torch.float)\n', (2456, 2478), False, 'import torch\n'), ((6193, 6221), 'torch.argmax', 'torch.argmax', (['y_batch'], {'dim': '(1)'}), '(y_batch, dim=1)\n', (6205, 6221), False, 'import torch\n'), ((7331, 7348), 'torch.autograd.Variable', 'Variable', (['x_batch'], {}), '(x_batch)\n', (7339, 7348), False, 'from torch.autograd import Variable\n'), ((5992, 6009), 'torch.autograd.Variable', 'Variable', (['x_batch'], {}), '(x_batch)\n', (6000, 6009), False, 'from torch.autograd import Variable\n'), ((4263, 4280), 'torch.autograd.Variable', 'Variable', (['x_batch'], {}), '(x_batch)\n', (4271, 4280), False, 'from torch.autograd import Variable\n')] |
#execute: python3 script_path image_path min_wavelet_level max_wavelet_level erosion_times output0 output1
import numpy as np
import pandas as pd
import pywt,cv2,sys,subprocess,homcloud,os
import matplotlib.pyplot as plt
import homcloud.interface as hc
args = sys.argv
image_path = args[1] # jpg file
min_wavelet_level = args[2] #int
max_wavelet_level = args[3] #int
erosion_times = args[4] #int
output0 = args[5] #txt file
output1 = args[6] #txt file
def preprocess(image_path, min_wavelet_level, max_wavelet_level, erosion_times):
imArray = cv2.imread(image_path)
#trim the image to 1200*1400
imArray = imArray[0:1200,0:1400]
#transform to grayscale
imArray = cv2.cvtColor(imArray, cv2.COLOR_BGR2GRAY)
#transform to float (0~1)
imArray = np.float32(imArray)
imArray /= 255
#calculate wavelet coefficients (Haar base)
mode = "haar"
coeffs=pywt.wavedec2(imArray, mode, level=10)
#abandon coefficients of specified levels
coeffs_H=list(coeffs)
if 0 < min_wavelet_level:
coeffs_H[0] *= 0
for i in range(11):
if (i < min_wavelet_level or i > max_wavelet_level):
coeffs_H[i] = tuple([np.zeros_like(v) for v in coeffs_H[i]])
#reconstruct the image
imArray_H=pywt.waverec2(coeffs_H, mode)
imArray_H *= 255
imArray_H = np.uint8(imArray_H)
#binarize the image using Otsu's method
_,thr = cv2.threshold(imArray_H,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
#morphological operations
#set the kernel
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
#erode the white region several times
binary_image = cv2.erode(thr, kernel, iterations = erosion_times)
return(binary_image)
def homcloud_binary(binary_image, output0, output1):
#get the locations of white pixels
white_region = binary_image > 128
#execute filtration
diag = hc.PDList.from_bitmap_levelset(hc.distance_transform(white_region, signed=True))
#get the 0-dim persistence diagram
p0 = diag.dth_diagram(0)
#calculate mid-life and life-time
p0_diag = np.vstack([p0.births, p0.deaths, (p0.births+p0.deaths)/2, np.abs(p0.births-p0.deaths)]).transpose()
p0_df = pd.DataFrame(p0_diag, columns=["birth","death","midlife", "lifetime"])
#output
p0_df.to_csv(path_or_buf=output0, sep="\t", index=False)
#get the 1-dim persistence diagram
p1 = diag.dth_diagram(1)
#get the 1-dim persistence diagram
p1_diag = np.vstack([p1.births, p1.deaths, (p1.births+p1.deaths)/2, np.abs(p1.births-p1.deaths)]).transpose()
#calculate mid-life and life-time
p1_df = pd.DataFrame(p1_diag, columns=["birth","death","midlife", "lifetime"])
#output
p1_df.to_csv(path_or_buf=output1, sep="\t", index=False)
binary_image = preprocess(image_path, min_wavelet_level, max_wavelet_level, erosion_times)
homcloud_binary(binary_image, output0, output1) | [
"pandas.DataFrame",
"numpy.uint8",
"numpy.zeros_like",
"numpy.abs",
"cv2.cvtColor",
"cv2.getStructuringElement",
"numpy.float32",
"cv2.threshold",
"homcloud.interface.distance_transform",
"cv2.imread",
"pywt.wavedec2",
"cv2.erode",
"pywt.waverec2"
] | [((547, 569), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (557, 569), False, 'import pywt, cv2, sys, subprocess, homcloud, os\n'), ((670, 711), 'cv2.cvtColor', 'cv2.cvtColor', (['imArray', 'cv2.COLOR_BGR2GRAY'], {}), '(imArray, cv2.COLOR_BGR2GRAY)\n', (682, 711), False, 'import pywt, cv2, sys, subprocess, homcloud, os\n'), ((751, 770), 'numpy.float32', 'np.float32', (['imArray'], {}), '(imArray)\n', (761, 770), True, 'import numpy as np\n'), ((858, 896), 'pywt.wavedec2', 'pywt.wavedec2', (['imArray', 'mode'], {'level': '(10)'}), '(imArray, mode, level=10)\n', (871, 896), False, 'import pywt, cv2, sys, subprocess, homcloud, os\n'), ((1184, 1213), 'pywt.waverec2', 'pywt.waverec2', (['coeffs_H', 'mode'], {}), '(coeffs_H, mode)\n', (1197, 1213), False, 'import pywt, cv2, sys, subprocess, homcloud, os\n'), ((1246, 1265), 'numpy.uint8', 'np.uint8', (['imArray_H'], {}), '(imArray_H)\n', (1254, 1265), True, 'import numpy as np\n'), ((1316, 1385), 'cv2.threshold', 'cv2.threshold', (['imArray_H', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(imArray_H, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (1329, 1385), False, 'import pywt, cv2, sys, subprocess, homcloud, os\n'), ((1435, 1485), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_CROSS', '(3, 3)'], {}), '(cv2.MORPH_CROSS, (3, 3))\n', (1460, 1485), False, 'import pywt, cv2, sys, subprocess, homcloud, os\n'), ((1539, 1587), 'cv2.erode', 'cv2.erode', (['thr', 'kernel'], {'iterations': 'erosion_times'}), '(thr, kernel, iterations=erosion_times)\n', (1548, 1587), False, 'import pywt, cv2, sys, subprocess, homcloud, os\n'), ((2064, 2136), 'pandas.DataFrame', 'pd.DataFrame', (['p0_diag'], {'columns': "['birth', 'death', 'midlife', 'lifetime']"}), "(p0_diag, columns=['birth', 'death', 'midlife', 'lifetime'])\n", (2076, 2136), True, 'import pandas as pd\n'), ((2456, 2528), 'pandas.DataFrame', 'pd.DataFrame', (['p1_diag'], {'columns': "['birth', 'death', 'midlife', 'lifetime']"}), "(p1_diag, columns=['birth', 'death', 'midlife', 'lifetime'])\n", (2468, 2528), True, 'import pandas as pd\n'), ((1797, 1845), 'homcloud.interface.distance_transform', 'hc.distance_transform', (['white_region'], {'signed': '(True)'}), '(white_region, signed=True)\n', (1818, 1845), True, 'import homcloud.interface as hc\n'), ((1109, 1125), 'numpy.zeros_like', 'np.zeros_like', (['v'], {}), '(v)\n', (1122, 1125), True, 'import numpy as np\n'), ((2013, 2042), 'numpy.abs', 'np.abs', (['(p0.births - p0.deaths)'], {}), '(p0.births - p0.deaths)\n', (2019, 2042), True, 'import numpy as np\n'), ((2370, 2399), 'numpy.abs', 'np.abs', (['(p1.births - p1.deaths)'], {}), '(p1.births - p1.deaths)\n', (2376, 2399), True, 'import numpy as np\n')] |
"""Lightweight transformer to parse and augment US zipcodes with info from zipcode database."""
from h2oaicore.transformer_utils import CustomTransformer
import datatable as dt
import numpy as np
from abc import ABC, abstractmethod
_global_modules_needed_by_name = ['zipcodes==1.0.5']
import zipcodes
class ZipcodeLightBaseTransformer(ABC):
@staticmethod
def get_default_properties():
return dict(col_type="categorical", min_cols=1, max_cols=1, relative_importance=1)
@abstractmethod
def get_property_name(self):
raise NotImplementedError
def get_zipcode_property(self, zipcode_obj):
if zipcode_obj is None:
return None
else:
return zipcode_obj[self.get_property_name()]
def parse_zipcode(self, value):
try:
result = zipcodes.matching(value)
if (len(result) > 1):
return result[0]
else:
return None
except ValueError:
return None
except TypeError:
raise TypeError
def fit_transform(self, X: dt.Frame, y: np.array = None):
return self.transform(X)
def transform(self, X: dt.Frame):
try:
X = dt.Frame(X)
X.names = ['zip_key']
X = X[:, str('zip_key')]
zip_list = dt.unique(X[~dt.isna(dt.f.zip_key), 0]).to_list()[0]
zip_features = [self.get_zipcode_property(self.parse_zipcode(x)) for x in zip_list]
X_g = dt.Frame({"zip_key": zip_list, self.get_property_name(): zip_features})
X_g.key = 'zip_key'
X_result = X[:, :, dt.join(X_g)]
return X_result[:, 1:]
except:
return np.zeros(X.shape[0])
class ZipcodeTypeTransformer(ZipcodeLightBaseTransformer, CustomTransformer):
def get_property_name(self, value):
return 'zip_code_type'
class ZipcodeCityTransformer(ZipcodeLightBaseTransformer, CustomTransformer):
def get_property_name(self, value):
return 'city'
class ZipcodeStateTransformer(ZipcodeLightBaseTransformer, CustomTransformer):
def get_property_name(self, value):
return 'state'
class ZipcodeLatitudeTransformer(ZipcodeLightBaseTransformer, CustomTransformer):
def get_property_name(self, value):
return 'lat'
class ZipcodeLongitudeTransformer(ZipcodeLightBaseTransformer, CustomTransformer):
def get_property_name(self, value):
return 'long'
class ZipcodeIsActiveTransformer(ZipcodeLightBaseTransformer, CustomTransformer):
def get_property_name(self, value):
return 'active'
class Zipcode5Transformer(ZipcodeLightBaseTransformer, CustomTransformer):
def get_property_name(self, value):
return 'zip_code'
| [
"numpy.zeros",
"datatable.Frame",
"zipcodes.matching",
"datatable.join",
"datatable.isna"
] | [((824, 848), 'zipcodes.matching', 'zipcodes.matching', (['value'], {}), '(value)\n', (841, 848), False, 'import zipcodes\n'), ((1231, 1242), 'datatable.Frame', 'dt.Frame', (['X'], {}), '(X)\n', (1239, 1242), True, 'import datatable as dt\n'), ((1723, 1743), 'numpy.zeros', 'np.zeros', (['X.shape[0]'], {}), '(X.shape[0])\n', (1731, 1743), True, 'import numpy as np\n'), ((1639, 1651), 'datatable.join', 'dt.join', (['X_g'], {}), '(X_g)\n', (1646, 1651), True, 'import datatable as dt\n'), ((1350, 1371), 'datatable.isna', 'dt.isna', (['dt.f.zip_key'], {}), '(dt.f.zip_key)\n', (1357, 1371), True, 'import datatable as dt\n')] |
from tqdm import trange, tqdm
import numpy as np
from polaris2.geomvis import R2toR, R2toC2, R3S2toR, utilsh
import logging
log = logging.getLogger('log')
# Simulates a linear dipole imaged by 4f detection system.
class FourF:
def __init__(self, NA=1.2, M=60, n0=1.3, lamb=0.546, wpx_real=6.5,
npx=(7*17, 7*17), ss=2, plotfov=10, irrad_title='4$f$ detector irradiance'):
# Input parameters
self.NA = NA
self.M = M # magnification
self.n0 = n0 # object index of refraction
self.lamb = lamb # wavelength
self.npx = npx # number of pixels on detector
self.ss = ss # super-sample factor for each pixel
self.wpx_real = wpx_real # pixel width (from camera data sheet)
# Plotting parameters
self.plotfov = plotfov # detctor region plotted
self.irrad_title = irrad_title
# Derived parameters
self.npxss = npx[0]*ss
self.wpx = self.wpx_real/M # pixel width in object space
self.fov = self.wpx*self.npx[0] # demagnified FOV
self.nuc = 2*NA/lamb # transverse cutoff frequency
self.num = n0/lamb # Ewald sphere radius
self.wbfp = 1/(self.wpx*self.npx[0]) # sample spacing in BFP
# FOV in bfp
if self.npxss % 2 == 0:
self.bfpmin = -0.5*self.npxss*self.wbfp
self.bfpmax = (0.5*self.npxss-1)*self.wbfp
else:
self.bfpmin = -0.5*(self.npxss-1)*self.wbfp
self.bfpmax = 0.5*(self.npxss-1)*self.wbfp
# ----------
# To back focal plane
# Generates the electric field pattern in the bfp due to a single dipole
# Input: R3S2toR.xyzj_list with a single entry
# Output: R2toC2.xy object
# Based on: <NAME>., & <NAME>. (2014)
# http://dx.doi.org/10.1021/jp501778z
def xyzj_single_to_xye_bfp(self, dip):
dip_pos = dip.data_xyz[0]
dip_orientation = dip.data_j[0]
# Precompute self.h_xyzJ_single_to_xye
self.precompute_xyzJ_single_to_xye_bfp(dip_pos)
# Matrix multiplication
out = np.einsum('ijkl,l->ijk', self.h_xyzJ_single_to_xye_bfp, dip_orientation)
# Return BFP
return R2toC2.xy(out,
circle=True, title='Scaled back focal plane fields',
xlabel='$\\textrm{2NA}/\lambda$',
toplabel='$E_x$', bottomlabel='$E_y$',
fov=[self.bfpmin, self.bfpmax],
plotfov=[-self.nuc/2, self.nuc/2],
colormax=1.5)
def precompute_xyzJ_single_to_xye_bfp(self, dip_pos):
rx, ry, rz = dip_pos
# Coordinates
x = np.linspace(self.bfpmin, self.bfpmax, self.npxss)
y = np.linspace(self.bfpmin, self.bfpmax, self.npxss)
taux, tauy = np.meshgrid(x, y, indexing='ij')
tauphi = np.arctan2(tauy, taux)
abstau = np.sqrt(taux**2 + tauy**2)
rho = abstau/self.num
# Apodization and phase calculations
apod = np.where(abstau < self.nuc/2, 1, 0)
sine_apod = (1 - apod*rho**2)**(-0.25)
sqrtrho = np.sqrt(1 - apod*rho**2)
tphase = np.exp(1j*2*np.pi*(taux*rx + tauy*ry))
aphase = np.exp(1j*2*np.pi*rz*np.sqrt(self.num**2 - apod*abstau**2))
pre = apod*sine_apod*tphase*aphase
# Compute matrix elements
self.h_xyzJ_single_to_xye_bfp = np.zeros((self.npxss, self.npxss, 2, 3), dtype='complex64')
self.h_xyzJ_single_to_xye_bfp[:,:,0,0] = (np.sin(tauphi))**2 + (np.cos(tauphi))**2*sqrtrho # gxx
self.h_xyzJ_single_to_xye_bfp[:,:,0,1] = 0.5*np.sin(2*tauphi)*(sqrtrho - 1) # gxy
self.h_xyzJ_single_to_xye_bfp[:,:,0,2] = rho*np.cos(tauphi) # gxz
self.h_xyzJ_single_to_xye_bfp[:,:,1,0] = self.h_xyzJ_single_to_xye_bfp[:,:,0,1] # gxy
self.h_xyzJ_single_to_xye_bfp[:,:,1,1] = (np.cos(tauphi))**2 + (np.sin(tauphi))**2*sqrtrho # gyy
self.h_xyzJ_single_to_xye_bfp[:,:,1,2] = rho*np.sin(tauphi) # gyz
self.h_xyzJ_single_to_xye_bfp = np.einsum('ijkl,ij->ijkl', self.h_xyzJ_single_to_xye_bfp, pre)
# TODO: Add monopole option flag
# # Monopole
# if sx == 0 and sy == 0 and sz == 0:
# self.h_xyzJ_single_to_xye_bfp = np.einsum('ijkl,ij->ijkl', self.h_xyzJ_single_to_xye_bfp, pre)
# ----------
# To detector
# Propagates the electric fields from the bfp to the image plane
# Input: R2toC2.xy object
# Output: R2toC2.xy object
def xye_bfp_to_xye_det(self, ebfp):
# Apply phase shift for even self.npx
if self.npxss % 2 == 0:
x = np.arange(-self.npxss//2, self.npxss//2)
y = np.arange(-self.npxss//2, self.npxss//2)
xx, yy = np.meshgrid(x, y)
phase_ramp = np.exp(-1j*np.pi*(xx + yy)/self.npxss)
to_ft = np.einsum('ijk,ij->ijk', ebfp.data, phase_ramp)
else:
to_ft = ebfp.data
# Fourier transforms
shifted = np.fft.ifftshift(to_ft, axes=(0,1))
ffted = np.fft.fft2(shifted, axes=(0,1))*(self.wbfp**2)
result = np.fft.fftshift(ffted, axes=(0,1))
return R2toC2.xy(result,
fov=[-self.fov/2, self.fov/2],
plotfov=[-self.plotfov/2, self.plotfov/2],
title='Image plane fields',
xlabel=str(self.plotfov)+' $\mu$m',
toplabel='$E_x$', bottomlabel='$E_y$')
# Generates the electric field pattern on the detector due to a single dipole
# Input: R3S2toR.xyzj_list with a single entry
# Output: R2toC2.xy object
def xyzj_single_to_xye_det(self, dip):
import pdb; pdb.set_trace()
dip_pos = dip.data_xyz[0]
dip_orientation = dip.data_j[0]
# Precompute self.h_xyzJ_single_to_xye
self.precompute_xyzJ_single_to_xye_det(dip_pos)
# Matrix multiplication
out = np.einsum('ijkl,l->ijk', self.h_xyzJ_single_to_xye_det, dip_orientation)
return R2toC2.xy(out,
fov=[-self.fov/2, self.fov/2],
plotfov=[-self.plotfov/2, self.plotfov/2],
title='Image plane fields',
xlabel=str(self.plotfov)+' $\mu$m',
toplabel='$E_x$', bottomlabel='$E_y$')
def precompute_xyzJ_single_to_xye_det(self, dip):
self.precompute_xyzJ_single_to_xye_bfp(dip)
self.h_xyzJ_single_to_xye_det = np.zeros((self.npxss, self.npxss, 2, 3), dtype='complex64')
for i in range(3):
temp_bfp = R2toC2.xy(self.h_xyzJ_single_to_xye_bfp[...,i])
self.h_xyzJ_single_to_xye_det[...,i] = self.xye_bfp_to_xye_det(temp_bfp).data
# Generates irradiance from electric fields
# Input: R2toC2.xy object
# Output: R2toR.xy object
def xye_to_xy_det(self, e):
# Calculate irradiance
irr = np.sum(np.abs(e.data)**2, axis=-1)
# "Undo" supersampling by summing over squares
irrpx = irr.reshape(self.npx[0], self.ss, self.npx[1], self.ss).sum(axis=(1,3))
# Return
return R2toR.xy(irrpx,
title=self.irrad_title,
fov=[-self.fov/2, self.fov/2],
plotfov=[-self.plotfov/2, self.plotfov/2])
# Generates the irradiance pattern due to a single dipole
# This is an abstraction over the main functions in this class
# Input: R3S2toR.xyzj_single with a single dipole
# Output: R2toC2.xy object
def xyzj_single_to_xy_det(self, dip): # dip_to_det
eim = self.xyzj_single_to_xye_det(dip)
return self.xye_to_xy_det(eim)
# Generates the irradiance pattern due to a single dipole distribution
# Input: R3S2toR.xyzJ_list with single entry
# Output: R2toR.xy object
def xyzJ_single_to_xy_det(self, dist):
self.precompute_xyzJ_single_to_xy_det(dist.data_xyz[0])
out = np.einsum('ijk,k->ij', self.h_xyzJ_single_to_xy_det, dist.data_J[0])
return R2toR.xy(out,
title=self.irrad_title,
fov=[-self.fov/2, self.fov/2],
plotfov=[-self.plotfov/2, self.plotfov/2])
def precompute_xyzJ_single_to_xy_det(self, dist):
self.precompute_xyzJ_single_to_xye_det(dist)
self.h_xyzJ_single_to_xy_det = np.zeros((self.npx[0], self.npx[1], 6), dtype='float32')
# Compute gaunt coeffs
G = utilsh.gaunt_l1l1_tol0l2()
# Compute matrix
out = np.real(np.einsum('ijkl,ijkm,nlm->ijn',
self.h_xyzJ_single_to_xye_det,
self.h_xyzJ_single_to_xye_det.conj(),
G)).astype('float32')
# Downsample and store
self.h_xyzJ_single_to_xy_det = out.reshape(self.npx[0], self.ss, self.npx[1], self.ss, 6).sum(axis=(1,3))
# Generates the irradiance pattern due to several dipole distributions
# This is a slow path for dense objects.
# Input: R3S2toR.xyzJ_list
# Output: R2toR.xy object
def xyzJ_list_to_xy_det(self, dist):
out = np.zeros(self.npx)
for m in range(dist.M):
distm = R3S2toR.xyzJ_list([dist.data_xyz[m]], [dist.data_J[m]])
out += self.xyzJ_single_to_xy_det(distm).data
return R2toR.xy(out,
title=self.irrad_title,
fov=[-self.fov/2, self.fov/2],
plotfov=[-self.plotfov/2, self.plotfov/2])
# Generates the irradiance pattern due to a dense xyzJ array.
# This is a faster path than xyzJ_list_to_xy_det
# Input: R3S2toR.xyzJ
# Output: R2toR.xy object
def xyzJ_to_xy_det(self, xyzJ):
# Assume input xy dimensions are <= detector xy dimensions
if xyzJ.data.shape[0:2] != self.npx:
log.info('Error! The xy dimensions of the xyzJ object must match the xy detector dimensions.')
return None
# self.precompute_XYzJ_to_XY_det(xyzJ.data.shape, xyzJ.vox_dims)
xyzJ_shift = np.fft.ifftshift(xyzJ.data, axes=(0,1))
XYzJ_shift = np.fft.rfft2(xyzJ_shift, axes=(0,1)) # FT along xy
XY_shift = np.einsum('ijkl,ijkl->ij', self.H_XYzJ_to_XY, XYzJ_shift) # Filter and sum over J and z
xy_shift = np.fft.irfft2(XY_shift, s=xyzJ.data.shape[0:2]) # IFT along XY
xy = np.fft.fftshift(xy_shift)
return R2toR.xy(np.real(xy),
px_dims=(self.wpx, self.wpx),
title=self.irrad_title,
fov=[-self.fov/2, self.fov/2],
plotfov=[-self.plotfov/2, self.plotfov/2],
xlabel=str(self.plotfov)+' $\mu$m')
def precompute_XYzJ_to_XY_det(self, input_shape, input_vox_dims):
# Adjust matrix size to account for rfft
input_shape_rfft = np.array(input_shape)
input_shape_rfft[1] = np.floor(input_shape_rfft[1]/2 + 1)
# Fill matrix
log.info('Computing psfs')
self.H_XYzJ_to_XY = np.zeros(input_shape_rfft, dtype=np.complex64)
oddK = (input_shape[2]%2 == 1)
for k in trange(int(input_shape[2]/2 + 0.5*oddK)):
z = (k + 0.5*oddK - 0.5*input_shape[2])*input_vox_dims[2] # k2z
self.precompute_xyzJ_single_to_xy_det([0,0,z])
h_shift = np.fft.ifftshift(self.h_xyzJ_single_to_xy_det, axes=(0,1))
# Exploit symmetry above and below the focal plane
self.H_XYzJ_to_XY[:,:,k,:6] = np.fft.rfft2(h_shift, axes=(0,1))*np.product(input_vox_dims) # For anisometric voxels
self.H_XYzJ_to_XY[:,:,-k,:6] = self.H_XYzJ_to_XY[:,:,k,:6]
# Simulates a linear dipole imaged by 4f system with a microlens array.
# Depends on FourF class.
class FourFLF:
def __init__(self, fulen=2500, ulenpx=(2**4 + 1),
ulens_aperture='square',
**kwargs):
self.fourf = FourF(**kwargs)
self.fulen = fulen # ulens focal length
self.ulenpx = ulenpx # number of pixels behind each ulens
self.ulens_aperture = ulens_aperture # 'square' or 'circle'
self.nulen = int(self.fourf.npx[0]/ulenpx)
# Generates detector fields (after microlenses) due to a single dipole
# Input: R3S2toR.xyzj_list with a single entry
# Output: R2toR.xy object
def xyzj_single_to_xye_det(self, dip):
dip_pos = dip.data_xyz[0]
dip_orientation = dip.data_j[0]
# Precompute self.h_xyzJ_single_to_xye
self.precompute_xyzJ_single_to_xye_det(dip_pos)
# Matrix multiplication
out = np.einsum('ijkl,l->ijk', self.h_xyzJ_single_to_xye_det, dip_orientation)
return R2toC2.xy(out,
fov=[-self.fourf.fov/2, self.fourf.fov/2],
plotfov=[-self.fourf.plotfov/2, self.fourf.plotfov/2],
title='Lightfield detector fields',
xlabel=str(self.fourf.plotfov)+' $\mu$m',
toplabel='$E_x$', bottomlabel='$E_y$')
def precompute_xyzJ_single_to_xye_det(self, dip_pos):
self.h_xyzJ_single_to_xye_det = np.zeros((self.fourf.npxss, self.fourf.npxss, 2, 3), dtype='complex64')
# Use FourF to calculate electric field in nominal image plane
# eim = self.fourf.xyzj_single_to_xye_det(dip)
self.fourf.precompute_xyzJ_single_to_xye_det(dip_pos)
# For x, y, z dipoles
for i in range(3):
eim = R2toC2.xy(self.fourf.h_xyzJ_single_to_xye_det[...,i])
# Build microlens tile
xmin = -0.5*(self.ulenpx-1)*self.fourf.wpx_real
xmax = 0.5*(self.ulenpx-1)*self.fourf.wpx_real
x = np.linspace(xmin, xmax, self.ulenpx*self.fourf.ss)
xx, yy = np.meshgrid(x, x)
ulentile = np.exp(-1j*np.pi*(xx**2 + yy**2)/(self.fulen*self.fourf.lamb))
if self.ulens_aperture == 'circle':
rr = np.sqrt(xx**2 + yy**2)
ulentile *= np.where(rr < xmax, 1, 0)
# Apply microlens phase
tiled = np.tile(ulentile, 2*(self.fourf.npx[0]//self.ulenpx,))
Eout = np.einsum('ijk,ij->ijk', eim.data, tiled)
# Fresnel propagation to detector
nu = np.fft.fftfreq(self.fourf.npxss, self.fourf.wpx_real/self.fourf.ss)
nuxx, nuyy = np.meshgrid(nu, nu)
H = np.exp(-1j*np.pi*self.fourf.lamb*self.fulen*(nuxx**2 + nuyy**2))
fft2 = np.fft.fft2(np.fft.fftshift(Eout, axes=(0,1)), axes=(0,1))
filtered = np.einsum('ijk,ij->ijk', fft2, H)
ifft2 = np.fft.ifftshift(np.fft.ifft2(filtered, axes=(0,1)), axes=(0,1))
self.h_xyzJ_single_to_xye_det[...,i] = ifft2
# Generates detector irradiances (after microlenses) due to a single dipole
# Input: R3S2toR.xyzj_list with a single entry
# Output: R2toR.xy object
def xyzj_single_to_xy_det(self, dip):
edet = self.xyzj_single_to_xye_det(dip)
return self.fourf.xye_to_xy_det(edet)
# Generates the irradiance pattern due to a single dipole distribution
# Input: R3S2toR.xyzJ_list with a single entry
# Output: R2toR.xy object
def xyzJ_single_to_xy_det(self, dist):
self.precompute_xyzJ_single_to_xy_det(dist.data_xyz[0])
out = np.einsum('ijk,k->ij', self.h_xyzJ_single_to_xy_det, dist.data_J[0])
return R2toR.xy(out,
title='Lightfield detector irradiance',
fov=[-self.fourf.fov/2, self.fourf.fov/2],
plotfov=[-self.fourf.plotfov/2, self.fourf.plotfov/2])
def precompute_xyzJ_single_to_xy_det(self, dist):
self.precompute_xyzJ_single_to_xye_det(dist)
self.h_xyzJ_single_to_xy_det = np.zeros((self.fourf.npx[0], self.fourf.npx[1], 6), dtype='float32')
# Compute gaunt coeffs
G = utilsh.gaunt_l1l1_tol0l2()
# Compute matrix
out = np.real(np.einsum('ijkl,ijkm,nlm->ijn',
self.h_xyzJ_single_to_xye_det,
self.h_xyzJ_single_to_xye_det.conj(),
G)).astype('float32')
# Downsample and store
self.h_xyzJ_single_to_xy_det = out.reshape(self.fourf.npx[0], self.fourf.ss, self.fourf.npx[1], self.fourf.ss, 6).sum(axis=(1,3))
# Generates the irradiance pattern due to several dipole distributions
# This is a slow path for dense objects.
# Input: R3S2toR.xyzJ_list
# Output: R2toR.xy object
def xyzJ_list_to_xy_det(self, dist):
out = np.zeros(self.fourf.npx)
for m in range(dist.M):
distm = R3S2toR.xyzJ_list([dist.data_xyz[m]], [dist.data_J[m]])
out += self.xyzJ_single_to_xy_det(distm).data
return R2toR.xy(out,
title=self.fourf.irrad_title,
fov=[-self.fourf.fov/2, self.fourf.fov/2],
plotfov=[-self.fourf.plotfov/2, self.fourf.plotfov/2])
# Generates the irradiance pattern on a lfcamera from a dense xyzJ array.
# Alternate name for xyzJ_to_xy_det
# This is a faster path than xyzJ_list_to_xy_det
# Input: R3S2toR.xyzJ
# Output: R2toR.xy object
def fwd(self, xyzJ):
log.info('Applying forward operator')
# Reshape xyzJ to uvstzJ
uu, vv, ss, tt, zz, jj, vp, tp = self.H_UvStzJ_to_UvSt_det.shape
uvstzJ_shape = (uu, vp, uu, tp, zz, jj)
uvstzJ = xyzJ.data.reshape(uvstzJ_shape)
# FFT uvstzJ to UvStzJ (with Fourier shifting)
uvstzJ_shift = np.fft.ifftshift(uvstzJ, axes=(0,2))
UvStzJ_shift = np.fft.rfftn(uvstzJ_shift, axes=(0,2))
# Forward model matrix multiplication
# Sum over mnop
UvSt_shift = np.einsum('ijklmnop,iokpmn->ijkl', self.H_UvStzJ_to_UvSt_det, UvStzJ_shift)
# FFT UvSt to uvst (with Fourier deshifting)
uvst_shift = np.fft.irfftn(UvSt_shift, s=(uu,uu), axes=(0,2))
uvst = np.fft.fftshift(uvst_shift, axes=(0,2))
# Reshape uvst to xy
xy = uvst.reshape(self.fourf.npx[0], self.fourf.npx[0])
return R2toR.xy(xy,
px_dims=2*(xyzJ.vox_dims[0]*vp/vv,),
title=self.fourf.irrad_title,
fov=[-self.fourf.fov/2, self.fourf.fov/2],
plotfov=[-self.fourf.fov/2, self.fourf.fov/2])
def precompute_fwd(self, input_shape, input_vox_dims):
# Precompute UvStzJ_to_UvSt_det
uu, vv, ss, tt, zz, jj = input_shape
matrix_shape = (uu, self.ulenpx, int(np.floor(ss/2 + 1)), self.ulenpx, zz, jj, vv, tt)
self.H_UvStzJ_to_UvSt_det = np.zeros(matrix_shape, dtype=np.complex64)
log.info('Computing psfs')
for i in trange(input_shape[1], desc='Outer loop'):
for j in trange(input_shape[3], desc='Inner loop', leave=False):
for k in range(input_shape[4]):
x = (i + 0.5 - 0.5*input_shape[1])*input_vox_dims[0] # i2x
y = (j + 0.5 - 0.5*input_shape[3])*input_vox_dims[1] # j2y
z = (k + 0.5 - 0.5*input_shape[4])*input_vox_dims[2] # k2z
self.precompute_xyzJ_single_to_xy_det([x,y,z])
h_uvstJ = self.h_xyzJ_single_to_xy_det.reshape((uu,self.ulenpx,ss,self.ulenpx,jj))
h_shift = np.fft.ifftshift(h_uvstJ, axes=(0,2))
entry = np.fft.rfft2(h_shift, axes=(0,2))
self.H_UvStzJ_to_UvSt_det[:,:,:,:,k,:6,i,j] = entry
# Generates the pseudoinverse solution
# Requires self.Hp and self.H_UvStzJ_to_UvSt_det to have been precomputed
# Input: R3S2toR.xy
# Output: R2toR.xyzJ object
def pinv(self, xy, out_vox_dims=[.1,.1,.1]):
log.info('Applying pseudoinverse operator')
U, v, S, t, z, J, vp, tp = self.H_UvStzJ_to_UvSt_det.shape
# Resort and FT data
uvst = xy.data.reshape((U,v,U,t)).astype('float32')
uvst_shift = np.fft.ifftshift(uvst, axes=(0,2))
UvSt_shift = np.fft.rfftn(uvst_shift, axes=(0,2))
# Apply pinv operator
UvStzJ_shift = np.einsum('ikjlopqr,ijkl->iokpqr', self.Hp, UvSt_shift)
# UvStzJ_shift = np.einsum('ikmnopqr,ikmn,ikjlmn,ijkl->iokpqr', VVall, 1/SSall, UUall, UvSt_shift)
# FFT UvSt to uvst (with Fourier deshifting)
uvstzJ_shift = np.fft.irfftn(UvStzJ_shift, s=(U,U), axes=(0,2))
uvstzJ = np.fft.fftshift(uvstzJ_shift, axes=(0,2))
# Reshape uvst to xy
xyzJ = uvstzJ.reshape((U*vp, U*vp, z, J))
# xyzJ = np.flip(xyzJ, axis=2) # Kludge for now. I think uvst is accidentally transposed.
return R3S2toR.xyzJ(xyzJ, vox_dims=out_vox_dims, title='Reconstructed object')
# Precompute the pseudoinverse matrix
# Requires self.H_UvStzJ_to_UvSt_det to have been precomputed
def precompute_pinv(self, eta=0):
log.info('Computing SVD')
U, v, S, t, z, J, vp, tp = self.H_UvStzJ_to_UvSt_det.shape
sort = np.moveaxis(self.H_UvStzJ_to_UvSt_det, [0,2,1,3,6,7,4,5], [0,1,2,3,4,5,6,7])
InvOI = sort.reshape((U*S, v*t, vp*tp*z*J))
Inv, O, I = InvOI.shape
K = np.min([I,O])
UU = np.zeros((Inv, K, O), dtype=np.complex64)
SS = np.zeros((Inv, K), dtype=np.float32)
VV = np.zeros((Inv, K, I), dtype=np.complex64)
for i in tqdm(range(Inv)):
uu, ss, vv = np.linalg.svd(InvOI[i,:,:], full_matrices=False)
UU[i,:] = uu
SS[i,:] = ss
VV[i,:] = vv
UUall = UU.reshape((U,S,v,t,v,t))
SSall = SS.reshape((U,S,v,t))
VVall = VV.reshape((U,S,v,t,vp,tp,z,J))
# Reconstruct
log.info('Computing pseduoinverse operator')
SSreg = np.where(SSall > 1e-7, SSall/(SSall**2 + eta), 0) # Regularize
self.Hp = np.einsum('ikmnopqr,ikmn,ikjlmn->ikjlopqr', VVall, SSreg, UUall)
| [
"numpy.moveaxis",
"numpy.arctan2",
"numpy.abs",
"numpy.floor",
"numpy.einsum",
"numpy.product",
"numpy.sin",
"numpy.arange",
"polaris2.geomvis.R2toC2.xy",
"numpy.exp",
"numpy.tile",
"numpy.linalg.svd",
"numpy.fft.rfft2",
"numpy.fft.ifft2",
"numpy.fft.ifftshift",
"numpy.meshgrid",
"po... | [((130, 154), 'logging.getLogger', 'logging.getLogger', (['"""log"""'], {}), "('log')\n", (147, 154), False, 'import logging\n'), ((2123, 2195), 'numpy.einsum', 'np.einsum', (['"""ijkl,l->ijk"""', 'self.h_xyzJ_single_to_xye_bfp', 'dip_orientation'], {}), "('ijkl,l->ijk', self.h_xyzJ_single_to_xye_bfp, dip_orientation)\n", (2132, 2195), True, 'import numpy as np\n'), ((2233, 2473), 'polaris2.geomvis.R2toC2.xy', 'R2toC2.xy', (['out'], {'circle': '(True)', 'title': '"""Scaled back focal plane fields"""', 'xlabel': '"""$\\\\textrm{2NA}/\\\\lambda$"""', 'toplabel': '"""$E_x$"""', 'bottomlabel': '"""$E_y$"""', 'fov': '[self.bfpmin, self.bfpmax]', 'plotfov': '[-self.nuc / 2, self.nuc / 2]', 'colormax': '(1.5)'}), "(out, circle=True, title='Scaled back focal plane fields', xlabel=\n '$\\\\textrm{2NA}/\\\\lambda$', toplabel='$E_x$', bottomlabel='$E_y$', fov=\n [self.bfpmin, self.bfpmax], plotfov=[-self.nuc / 2, self.nuc / 2],\n colormax=1.5)\n", (2242, 2473), False, 'from polaris2.geomvis import R2toR, R2toC2, R3S2toR, utilsh\n'), ((2740, 2789), 'numpy.linspace', 'np.linspace', (['self.bfpmin', 'self.bfpmax', 'self.npxss'], {}), '(self.bfpmin, self.bfpmax, self.npxss)\n', (2751, 2789), True, 'import numpy as np\n'), ((2802, 2851), 'numpy.linspace', 'np.linspace', (['self.bfpmin', 'self.bfpmax', 'self.npxss'], {}), '(self.bfpmin, self.bfpmax, self.npxss)\n', (2813, 2851), True, 'import numpy as np\n'), ((2874, 2906), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {'indexing': '"""ij"""'}), "(x, y, indexing='ij')\n", (2885, 2906), True, 'import numpy as np\n'), ((2924, 2946), 'numpy.arctan2', 'np.arctan2', (['tauy', 'taux'], {}), '(tauy, taux)\n', (2934, 2946), True, 'import numpy as np\n'), ((2964, 2994), 'numpy.sqrt', 'np.sqrt', (['(taux ** 2 + tauy ** 2)'], {}), '(taux ** 2 + tauy ** 2)\n', (2971, 2994), True, 'import numpy as np\n'), ((3082, 3119), 'numpy.where', 'np.where', (['(abstau < self.nuc / 2)', '(1)', '(0)'], {}), '(abstau < self.nuc / 2, 1, 0)\n', (3090, 3119), True, 'import numpy as np\n'), ((3183, 3211), 'numpy.sqrt', 'np.sqrt', (['(1 - apod * rho ** 2)'], {}), '(1 - apod * rho ** 2)\n', (3190, 3211), True, 'import numpy as np\n'), ((3225, 3275), 'numpy.exp', 'np.exp', (['(1.0j * 2 * np.pi * (taux * rx + tauy * ry))'], {}), '(1.0j * 2 * np.pi * (taux * rx + tauy * ry))\n', (3231, 3275), True, 'import numpy as np\n'), ((3467, 3526), 'numpy.zeros', 'np.zeros', (['(self.npxss, self.npxss, 2, 3)'], {'dtype': '"""complex64"""'}), "((self.npxss, self.npxss, 2, 3), dtype='complex64')\n", (3475, 3526), True, 'import numpy as np\n'), ((4110, 4172), 'numpy.einsum', 'np.einsum', (['"""ijkl,ij->ijkl"""', 'self.h_xyzJ_single_to_xye_bfp', 'pre'], {}), "('ijkl,ij->ijkl', self.h_xyzJ_single_to_xye_bfp, pre)\n", (4119, 4172), True, 'import numpy as np\n'), ((5067, 5103), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['to_ft'], {'axes': '(0, 1)'}), '(to_ft, axes=(0, 1))\n', (5083, 5103), True, 'import numpy as np\n'), ((5184, 5219), 'numpy.fft.fftshift', 'np.fft.fftshift', (['ffted'], {'axes': '(0, 1)'}), '(ffted, axes=(0, 1))\n', (5199, 5219), True, 'import numpy as np\n'), ((5783, 5798), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (5796, 5798), False, 'import pdb\n'), ((6041, 6113), 'numpy.einsum', 'np.einsum', (['"""ijkl,l->ijk"""', 'self.h_xyzJ_single_to_xye_det', 'dip_orientation'], {}), "('ijkl,l->ijk', self.h_xyzJ_single_to_xye_det, dip_orientation)\n", (6050, 6113), True, 'import numpy as np\n'), ((6595, 6654), 'numpy.zeros', 'np.zeros', (['(self.npxss, self.npxss, 2, 3)'], {'dtype': '"""complex64"""'}), "((self.npxss, self.npxss, 2, 3), dtype='complex64')\n", (6603, 6654), True, 'import numpy as np\n'), ((7243, 7368), 'polaris2.geomvis.R2toR.xy', 'R2toR.xy', (['irrpx'], {'title': 'self.irrad_title', 'fov': '[-self.fov / 2, self.fov / 2]', 'plotfov': '[-self.plotfov / 2, self.plotfov / 2]'}), '(irrpx, title=self.irrad_title, fov=[-self.fov / 2, self.fov / 2],\n plotfov=[-self.plotfov / 2, self.plotfov / 2])\n', (7251, 7368), False, 'from polaris2.geomvis import R2toR, R2toC2, R3S2toR, utilsh\n'), ((8062, 8130), 'numpy.einsum', 'np.einsum', (['"""ijk,k->ij"""', 'self.h_xyzJ_single_to_xy_det', 'dist.data_J[0]'], {}), "('ijk,k->ij', self.h_xyzJ_single_to_xy_det, dist.data_J[0])\n", (8071, 8130), True, 'import numpy as np\n'), ((8147, 8270), 'polaris2.geomvis.R2toR.xy', 'R2toR.xy', (['out'], {'title': 'self.irrad_title', 'fov': '[-self.fov / 2, self.fov / 2]', 'plotfov': '[-self.plotfov / 2, self.plotfov / 2]'}), '(out, title=self.irrad_title, fov=[-self.fov / 2, self.fov / 2],\n plotfov=[-self.plotfov / 2, self.plotfov / 2])\n', (8155, 8270), False, 'from polaris2.geomvis import R2toR, R2toC2, R3S2toR, utilsh\n'), ((8479, 8535), 'numpy.zeros', 'np.zeros', (['(self.npx[0], self.npx[1], 6)'], {'dtype': '"""float32"""'}), "((self.npx[0], self.npx[1], 6), dtype='float32')\n", (8487, 8535), True, 'import numpy as np\n'), ((8588, 8614), 'polaris2.geomvis.utilsh.gaunt_l1l1_tol0l2', 'utilsh.gaunt_l1l1_tol0l2', ([], {}), '()\n', (8612, 8614), False, 'from polaris2.geomvis import R2toR, R2toC2, R3S2toR, utilsh\n'), ((9265, 9283), 'numpy.zeros', 'np.zeros', (['self.npx'], {}), '(self.npx)\n', (9273, 9283), True, 'import numpy as np\n'), ((9465, 9588), 'polaris2.geomvis.R2toR.xy', 'R2toR.xy', (['out'], {'title': 'self.irrad_title', 'fov': '[-self.fov / 2, self.fov / 2]', 'plotfov': '[-self.plotfov / 2, self.plotfov / 2]'}), '(out, title=self.irrad_title, fov=[-self.fov / 2, self.fov / 2],\n plotfov=[-self.plotfov / 2, self.plotfov / 2])\n', (9473, 9588), False, 'from polaris2.geomvis import R2toR, R2toC2, R3S2toR, utilsh\n'), ((10222, 10262), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['xyzJ.data'], {'axes': '(0, 1)'}), '(xyzJ.data, axes=(0, 1))\n', (10238, 10262), True, 'import numpy as np\n'), ((10283, 10320), 'numpy.fft.rfft2', 'np.fft.rfft2', (['xyzJ_shift'], {'axes': '(0, 1)'}), '(xyzJ_shift, axes=(0, 1))\n', (10295, 10320), True, 'import numpy as np\n'), ((10353, 10410), 'numpy.einsum', 'np.einsum', (['"""ijkl,ijkl->ij"""', 'self.H_XYzJ_to_XY', 'XYzJ_shift'], {}), "('ijkl,ijkl->ij', self.H_XYzJ_to_XY, XYzJ_shift)\n", (10362, 10410), True, 'import numpy as np\n'), ((10461, 10508), 'numpy.fft.irfft2', 'np.fft.irfft2', (['XY_shift'], {'s': 'xyzJ.data.shape[0:2]'}), '(XY_shift, s=xyzJ.data.shape[0:2])\n', (10474, 10508), True, 'import numpy as np\n'), ((10537, 10562), 'numpy.fft.fftshift', 'np.fft.fftshift', (['xy_shift'], {}), '(xy_shift)\n', (10552, 10562), True, 'import numpy as np\n'), ((11040, 11061), 'numpy.array', 'np.array', (['input_shape'], {}), '(input_shape)\n', (11048, 11061), True, 'import numpy as np\n'), ((11092, 11129), 'numpy.floor', 'np.floor', (['(input_shape_rfft[1] / 2 + 1)'], {}), '(input_shape_rfft[1] / 2 + 1)\n', (11100, 11129), True, 'import numpy as np\n'), ((11222, 11268), 'numpy.zeros', 'np.zeros', (['input_shape_rfft'], {'dtype': 'np.complex64'}), '(input_shape_rfft, dtype=np.complex64)\n', (11230, 11268), True, 'import numpy as np\n'), ((12819, 12891), 'numpy.einsum', 'np.einsum', (['"""ijkl,l->ijk"""', 'self.h_xyzJ_single_to_xye_det', 'dip_orientation'], {}), "('ijkl,l->ijk', self.h_xyzJ_single_to_xye_det, dip_orientation)\n", (12828, 12891), True, 'import numpy as np\n'), ((13362, 13433), 'numpy.zeros', 'np.zeros', (['(self.fourf.npxss, self.fourf.npxss, 2, 3)'], {'dtype': '"""complex64"""'}), "((self.fourf.npxss, self.fourf.npxss, 2, 3), dtype='complex64')\n", (13370, 13433), True, 'import numpy as np\n'), ((15575, 15643), 'numpy.einsum', 'np.einsum', (['"""ijk,k->ij"""', 'self.h_xyzJ_single_to_xy_det', 'dist.data_J[0]'], {}), "('ijk,k->ij', self.h_xyzJ_single_to_xy_det, dist.data_J[0])\n", (15584, 15643), True, 'import numpy as np\n'), ((15660, 15828), 'polaris2.geomvis.R2toR.xy', 'R2toR.xy', (['out'], {'title': '"""Lightfield detector irradiance"""', 'fov': '[-self.fourf.fov / 2, self.fourf.fov / 2]', 'plotfov': '[-self.fourf.plotfov / 2, self.fourf.plotfov / 2]'}), "(out, title='Lightfield detector irradiance', fov=[-self.fourf.fov /\n 2, self.fourf.fov / 2], plotfov=[-self.fourf.plotfov / 2, self.fourf.\n plotfov / 2])\n", (15668, 15828), False, 'from polaris2.geomvis import R2toR, R2toC2, R3S2toR, utilsh\n'), ((16032, 16100), 'numpy.zeros', 'np.zeros', (['(self.fourf.npx[0], self.fourf.npx[1], 6)'], {'dtype': '"""float32"""'}), "((self.fourf.npx[0], self.fourf.npx[1], 6), dtype='float32')\n", (16040, 16100), True, 'import numpy as np\n'), ((16145, 16171), 'polaris2.geomvis.utilsh.gaunt_l1l1_tol0l2', 'utilsh.gaunt_l1l1_tol0l2', ([], {}), '()\n', (16169, 16171), False, 'from polaris2.geomvis import R2toR, R2toC2, R3S2toR, utilsh\n'), ((16846, 16870), 'numpy.zeros', 'np.zeros', (['self.fourf.npx'], {}), '(self.fourf.npx)\n', (16854, 16870), True, 'import numpy as np\n'), ((17052, 17206), 'polaris2.geomvis.R2toR.xy', 'R2toR.xy', (['out'], {'title': 'self.fourf.irrad_title', 'fov': '[-self.fourf.fov / 2, self.fourf.fov / 2]', 'plotfov': '[-self.fourf.plotfov / 2, self.fourf.plotfov / 2]'}), '(out, title=self.fourf.irrad_title, fov=[-self.fourf.fov / 2, self.\n fourf.fov / 2], plotfov=[-self.fourf.plotfov / 2, self.fourf.plotfov / 2])\n', (17060, 17206), False, 'from polaris2.geomvis import R2toR, R2toC2, R3S2toR, utilsh\n'), ((17852, 17889), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['uvstzJ'], {'axes': '(0, 2)'}), '(uvstzJ, axes=(0, 2))\n', (17868, 17889), True, 'import numpy as np\n'), ((17912, 17951), 'numpy.fft.rfftn', 'np.fft.rfftn', (['uvstzJ_shift'], {'axes': '(0, 2)'}), '(uvstzJ_shift, axes=(0, 2))\n', (17924, 17951), True, 'import numpy as np\n'), ((18043, 18118), 'numpy.einsum', 'np.einsum', (['"""ijklmnop,iokpmn->ijkl"""', 'self.H_UvStzJ_to_UvSt_det', 'UvStzJ_shift'], {}), "('ijklmnop,iokpmn->ijkl', self.H_UvStzJ_to_UvSt_det, UvStzJ_shift)\n", (18052, 18118), True, 'import numpy as np\n'), ((18194, 18244), 'numpy.fft.irfftn', 'np.fft.irfftn', (['UvSt_shift'], {'s': '(uu, uu)', 'axes': '(0, 2)'}), '(UvSt_shift, s=(uu, uu), axes=(0, 2))\n', (18207, 18244), True, 'import numpy as np\n'), ((18258, 18298), 'numpy.fft.fftshift', 'np.fft.fftshift', (['uvst_shift'], {'axes': '(0, 2)'}), '(uvst_shift, axes=(0, 2))\n', (18273, 18298), True, 'import numpy as np\n'), ((18408, 18601), 'polaris2.geomvis.R2toR.xy', 'R2toR.xy', (['xy'], {'px_dims': '(2 * (xyzJ.vox_dims[0] * vp / vv,))', 'title': 'self.fourf.irrad_title', 'fov': '[-self.fourf.fov / 2, self.fourf.fov / 2]', 'plotfov': '[-self.fourf.fov / 2, self.fourf.fov / 2]'}), '(xy, px_dims=2 * (xyzJ.vox_dims[0] * vp / vv,), title=self.fourf.\n irrad_title, fov=[-self.fourf.fov / 2, self.fourf.fov / 2], plotfov=[-\n self.fourf.fov / 2, self.fourf.fov / 2])\n', (18416, 18601), False, 'from polaris2.geomvis import R2toR, R2toC2, R3S2toR, utilsh\n'), ((18950, 18992), 'numpy.zeros', 'np.zeros', (['matrix_shape'], {'dtype': 'np.complex64'}), '(matrix_shape, dtype=np.complex64)\n', (18958, 18992), True, 'import numpy as np\n'), ((19046, 19087), 'tqdm.trange', 'trange', (['input_shape[1]'], {'desc': '"""Outer loop"""'}), "(input_shape[1], desc='Outer loop')\n", (19052, 19087), False, 'from tqdm import trange, tqdm\n'), ((20310, 20345), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['uvst'], {'axes': '(0, 2)'}), '(uvst, axes=(0, 2))\n', (20326, 20345), True, 'import numpy as np\n'), ((20366, 20403), 'numpy.fft.rfftn', 'np.fft.rfftn', (['uvst_shift'], {'axes': '(0, 2)'}), '(uvst_shift, axes=(0, 2))\n', (20378, 20403), True, 'import numpy as np\n'), ((20457, 20512), 'numpy.einsum', 'np.einsum', (['"""ikjlopqr,ijkl->iokpqr"""', 'self.Hp', 'UvSt_shift'], {}), "('ikjlopqr,ijkl->iokpqr', self.Hp, UvSt_shift)\n", (20466, 20512), True, 'import numpy as np\n'), ((20697, 20747), 'numpy.fft.irfftn', 'np.fft.irfftn', (['UvStzJ_shift'], {'s': '(U, U)', 'axes': '(0, 2)'}), '(UvStzJ_shift, s=(U, U), axes=(0, 2))\n', (20710, 20747), True, 'import numpy as np\n'), ((20763, 20805), 'numpy.fft.fftshift', 'np.fft.fftshift', (['uvstzJ_shift'], {'axes': '(0, 2)'}), '(uvstzJ_shift, axes=(0, 2))\n', (20778, 20805), True, 'import numpy as np\n'), ((21007, 21078), 'polaris2.geomvis.R3S2toR.xyzJ', 'R3S2toR.xyzJ', (['xyzJ'], {'vox_dims': 'out_vox_dims', 'title': '"""Reconstructed object"""'}), "(xyzJ, vox_dims=out_vox_dims, title='Reconstructed object')\n", (21019, 21078), False, 'from polaris2.geomvis import R2toR, R2toC2, R3S2toR, utilsh\n'), ((21350, 21445), 'numpy.moveaxis', 'np.moveaxis', (['self.H_UvStzJ_to_UvSt_det', '[0, 2, 1, 3, 6, 7, 4, 5]', '[0, 1, 2, 3, 4, 5, 6, 7]'], {}), '(self.H_UvStzJ_to_UvSt_det, [0, 2, 1, 3, 6, 7, 4, 5], [0, 1, 2, \n 3, 4, 5, 6, 7])\n', (21361, 21445), True, 'import numpy as np\n'), ((21523, 21537), 'numpy.min', 'np.min', (['[I, O]'], {}), '([I, O])\n', (21529, 21537), True, 'import numpy as np\n'), ((21550, 21591), 'numpy.zeros', 'np.zeros', (['(Inv, K, O)'], {'dtype': 'np.complex64'}), '((Inv, K, O), dtype=np.complex64)\n', (21558, 21591), True, 'import numpy as np\n'), ((21605, 21641), 'numpy.zeros', 'np.zeros', (['(Inv, K)'], {'dtype': 'np.float32'}), '((Inv, K), dtype=np.float32)\n', (21613, 21641), True, 'import numpy as np\n'), ((21655, 21696), 'numpy.zeros', 'np.zeros', (['(Inv, K, I)'], {'dtype': 'np.complex64'}), '((Inv, K, I), dtype=np.complex64)\n', (21663, 21696), True, 'import numpy as np\n'), ((22114, 22168), 'numpy.where', 'np.where', (['(SSall > 1e-07)', '(SSall / (SSall ** 2 + eta))', '(0)'], {}), '(SSall > 1e-07, SSall / (SSall ** 2 + eta), 0)\n', (22122, 22168), True, 'import numpy as np\n'), ((22195, 22259), 'numpy.einsum', 'np.einsum', (['"""ikmnopqr,ikmn,ikjlmn->ikjlopqr"""', 'VVall', 'SSreg', 'UUall'], {}), "('ikmnopqr,ikmn,ikjlmn->ikjlopqr', VVall, SSreg, UUall)\n", (22204, 22259), True, 'import numpy as np\n'), ((3775, 3789), 'numpy.cos', 'np.cos', (['tauphi'], {}), '(tauphi)\n', (3781, 3789), True, 'import numpy as np\n'), ((4048, 4062), 'numpy.sin', 'np.sin', (['tauphi'], {}), '(tauphi)\n', (4054, 4062), True, 'import numpy as np\n'), ((4706, 4750), 'numpy.arange', 'np.arange', (['(-self.npxss // 2)', '(self.npxss // 2)'], {}), '(-self.npxss // 2, self.npxss // 2)\n', (4715, 4750), True, 'import numpy as np\n'), ((4763, 4807), 'numpy.arange', 'np.arange', (['(-self.npxss // 2)', '(self.npxss // 2)'], {}), '(-self.npxss // 2, self.npxss // 2)\n', (4772, 4807), True, 'import numpy as np\n'), ((4825, 4842), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (4836, 4842), True, 'import numpy as np\n'), ((4868, 4914), 'numpy.exp', 'np.exp', (['(-1.0j * np.pi * (xx + yy) / self.npxss)'], {}), '(-1.0j * np.pi * (xx + yy) / self.npxss)\n', (4874, 4914), True, 'import numpy as np\n'), ((4927, 4974), 'numpy.einsum', 'np.einsum', (['"""ijk,ij->ijk"""', 'ebfp.data', 'phase_ramp'], {}), "('ijk,ij->ijk', ebfp.data, phase_ramp)\n", (4936, 4974), True, 'import numpy as np\n'), ((5119, 5152), 'numpy.fft.fft2', 'np.fft.fft2', (['shifted'], {'axes': '(0, 1)'}), '(shifted, axes=(0, 1))\n', (5130, 5152), True, 'import numpy as np\n'), ((6705, 6753), 'polaris2.geomvis.R2toC2.xy', 'R2toC2.xy', (['self.h_xyzJ_single_to_xye_bfp[..., i]'], {}), '(self.h_xyzJ_single_to_xye_bfp[..., i])\n', (6714, 6753), False, 'from polaris2.geomvis import R2toR, R2toC2, R3S2toR, utilsh\n'), ((9336, 9391), 'polaris2.geomvis.R3S2toR.xyzJ_list', 'R3S2toR.xyzJ_list', (['[dist.data_xyz[m]]', '[dist.data_J[m]]'], {}), '([dist.data_xyz[m]], [dist.data_J[m]])\n', (9353, 9391), False, 'from polaris2.geomvis import R2toR, R2toC2, R3S2toR, utilsh\n'), ((10596, 10607), 'numpy.real', 'np.real', (['xy'], {}), '(xy)\n', (10603, 10607), True, 'import numpy as np\n'), ((11524, 11583), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['self.h_xyzJ_single_to_xy_det'], {'axes': '(0, 1)'}), '(self.h_xyzJ_single_to_xy_det, axes=(0, 1))\n', (11540, 11583), True, 'import numpy as np\n'), ((13708, 13762), 'polaris2.geomvis.R2toC2.xy', 'R2toC2.xy', (['self.fourf.h_xyzJ_single_to_xye_det[..., i]'], {}), '(self.fourf.h_xyzJ_single_to_xye_det[..., i])\n', (13717, 13762), False, 'from polaris2.geomvis import R2toR, R2toC2, R3S2toR, utilsh\n'), ((13945, 13997), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(self.ulenpx * self.fourf.ss)'], {}), '(xmin, xmax, self.ulenpx * self.fourf.ss)\n', (13956, 13997), True, 'import numpy as np\n'), ((14017, 14034), 'numpy.meshgrid', 'np.meshgrid', (['x', 'x'], {}), '(x, x)\n', (14028, 14034), True, 'import numpy as np\n'), ((14058, 14134), 'numpy.exp', 'np.exp', (['(-1.0j * np.pi * (xx ** 2 + yy ** 2) / (self.fulen * self.fourf.lamb))'], {}), '(-1.0j * np.pi * (xx ** 2 + yy ** 2) / (self.fulen * self.fourf.lamb))\n', (14064, 14134), True, 'import numpy as np\n'), ((14338, 14396), 'numpy.tile', 'np.tile', (['ulentile', '(2 * (self.fourf.npx[0] // self.ulenpx,))'], {}), '(ulentile, 2 * (self.fourf.npx[0] // self.ulenpx,))\n', (14345, 14396), True, 'import numpy as np\n'), ((14412, 14453), 'numpy.einsum', 'np.einsum', (['"""ijk,ij->ijk"""', 'eim.data', 'tiled'], {}), "('ijk,ij->ijk', eim.data, tiled)\n", (14421, 14453), True, 'import numpy as np\n'), ((14519, 14588), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['self.fourf.npxss', '(self.fourf.wpx_real / self.fourf.ss)'], {}), '(self.fourf.npxss, self.fourf.wpx_real / self.fourf.ss)\n', (14533, 14588), True, 'import numpy as np\n'), ((14612, 14631), 'numpy.meshgrid', 'np.meshgrid', (['nu', 'nu'], {}), '(nu, nu)\n', (14623, 14631), True, 'import numpy as np\n'), ((14648, 14726), 'numpy.exp', 'np.exp', (['(-1.0j * np.pi * self.fourf.lamb * self.fulen * (nuxx ** 2 + nuyy ** 2))'], {}), '(-1.0j * np.pi * self.fourf.lamb * self.fulen * (nuxx ** 2 + nuyy ** 2))\n', (14654, 14726), True, 'import numpy as np\n'), ((14814, 14847), 'numpy.einsum', 'np.einsum', (['"""ijk,ij->ijk"""', 'fft2', 'H'], {}), "('ijk,ij->ijk', fft2, H)\n", (14823, 14847), True, 'import numpy as np\n'), ((16923, 16978), 'polaris2.geomvis.R3S2toR.xyzJ_list', 'R3S2toR.xyzJ_list', (['[dist.data_xyz[m]]', '[dist.data_J[m]]'], {}), '([dist.data_xyz[m]], [dist.data_J[m]])\n', (16940, 16978), False, 'from polaris2.geomvis import R2toR, R2toC2, R3S2toR, utilsh\n'), ((19110, 19164), 'tqdm.trange', 'trange', (['input_shape[3]'], {'desc': '"""Inner loop"""', 'leave': '(False)'}), "(input_shape[3], desc='Inner loop', leave=False)\n", (19116, 19164), False, 'from tqdm import trange, tqdm\n'), ((21757, 21807), 'numpy.linalg.svd', 'np.linalg.svd', (['InvOI[i, :, :]'], {'full_matrices': '(False)'}), '(InvOI[i, :, :], full_matrices=False)\n', (21770, 21807), True, 'import numpy as np\n'), ((3302, 3345), 'numpy.sqrt', 'np.sqrt', (['(self.num ** 2 - apod * abstau ** 2)'], {}), '(self.num ** 2 - apod * abstau ** 2)\n', (3309, 3345), True, 'import numpy as np\n'), ((3577, 3591), 'numpy.sin', 'np.sin', (['tauphi'], {}), '(tauphi)\n', (3583, 3591), True, 'import numpy as np\n'), ((3685, 3703), 'numpy.sin', 'np.sin', (['(2 * tauphi)'], {}), '(2 * tauphi)\n', (3691, 3703), True, 'import numpy as np\n'), ((3940, 3954), 'numpy.cos', 'np.cos', (['tauphi'], {}), '(tauphi)\n', (3946, 3954), True, 'import numpy as np\n'), ((7036, 7050), 'numpy.abs', 'np.abs', (['e.data'], {}), '(e.data)\n', (7042, 7050), True, 'import numpy as np\n'), ((11689, 11723), 'numpy.fft.rfft2', 'np.fft.rfft2', (['h_shift'], {'axes': '(0, 1)'}), '(h_shift, axes=(0, 1))\n', (11701, 11723), True, 'import numpy as np\n'), ((11723, 11749), 'numpy.product', 'np.product', (['input_vox_dims'], {}), '(input_vox_dims)\n', (11733, 11749), True, 'import numpy as np\n'), ((14191, 14217), 'numpy.sqrt', 'np.sqrt', (['(xx ** 2 + yy ** 2)'], {}), '(xx ** 2 + yy ** 2)\n', (14198, 14217), True, 'import numpy as np\n'), ((14243, 14268), 'numpy.where', 'np.where', (['(rr < xmax)', '(1)', '(0)'], {}), '(rr < xmax, 1, 0)\n', (14251, 14268), True, 'import numpy as np\n'), ((14744, 14778), 'numpy.fft.fftshift', 'np.fft.fftshift', (['Eout'], {'axes': '(0, 1)'}), '(Eout, axes=(0, 1))\n', (14759, 14778), True, 'import numpy as np\n'), ((14885, 14920), 'numpy.fft.ifft2', 'np.fft.ifft2', (['filtered'], {'axes': '(0, 1)'}), '(filtered, axes=(0, 1))\n', (14897, 14920), True, 'import numpy as np\n'), ((18864, 18884), 'numpy.floor', 'np.floor', (['(ss / 2 + 1)'], {}), '(ss / 2 + 1)\n', (18872, 18884), True, 'import numpy as np\n'), ((3599, 3613), 'numpy.cos', 'np.cos', (['tauphi'], {}), '(tauphi)\n', (3605, 3613), True, 'import numpy as np\n'), ((3962, 3976), 'numpy.sin', 'np.sin', (['tauphi'], {}), '(tauphi)\n', (3968, 3976), True, 'import numpy as np\n'), ((19672, 19710), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['h_uvstJ'], {'axes': '(0, 2)'}), '(h_uvstJ, axes=(0, 2))\n', (19688, 19710), True, 'import numpy as np\n'), ((19738, 19772), 'numpy.fft.rfft2', 'np.fft.rfft2', (['h_shift'], {'axes': '(0, 2)'}), '(h_shift, axes=(0, 2))\n', (19750, 19772), True, 'import numpy as np\n')] |
# GP regression
import numpy as np
from sklearn.metrics import pairwise_distances
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import pickle
import os
def make_K_SE(x, sigma, l):
# using SE kernel def given in Murphy pg. 521
# get pairwise distances
km = pairwise_distances(x.reshape(-1,1))**2
# turn into an RBF gram matrix
km *= (-1.0 / (2.0*(l**2.0)))
return np.exp(km)*sigma**2
def make_Kstar_SE(x, xstar, sigma=1, l=0.5):
# get squared distance vector
N = x.shape[0]
xstar = np.tile(xstar, N)
dists_sq = (x - xstar)**2
# turn into gram vector
v = dists_sq*(-1.0 / (2.0*(l**2.0)))
return np.exp(v)*(sigma**2)
def SE_kernel(x1, x2, kernel_params):
sigma = kernel_params['sigma']
l = kernel_params['l']
# using SE kernel def given in Murphy pg. 521
# get pairwise distances
km = pairwise_distances(x.reshape(-1,1))**2
# turn into an RBF gram matrix
km *= (-1.0 / (2.0*(l**2.0)))
return np.exp(km)*sigma**2
# sigma = kernel_params['sigma']
# l = kernel_params['l']
# # evaluate kernel
# return np.exp((-1.0 / (2.0*(l**2.0)))*(x2-x1)**2)*sigma**2
def NN_kernel(x1, x2, kernel_params):
"""equivalent kernel for single layer neural net. expect x1, x2 both column vectors"""
sigma_b = kernel_params['sigma_b']
sigma_w = kernel_params['sigma_w']
x1 = np.array([x1])
x2 = np.array([x2])
x2 = x2.T
K12 = sigma_b**2 + (sigma_w**2)*x1*x2
K11 = sigma_b**2 + (sigma_w**2)*x1*x1
K22 = sigma_b**2 + (sigma_w**2)*x2*x2
theta = np.arccos(K12/np.sqrt(np.multiply(K11, K22)))
return sigma_b**2 + (sigma_w**2/(2*np.pi))*np.sqrt(np.multiply(K11, K22))*(np.sin(theta) + np.multiply((np.pi - theta), np.cos(theta)))
def NN_kernel_multidim(x1, x2, kernel_params):
############################# MAYBE BUGGY???
"""equivalent kernel for single layer neural net. expect x1, x2 both (N x D) matrices, with N points and D dimensions"""
sigma_b = kernel_params['sigma_b']
sigma_w = kernel_params['sigma_w']
d_in = x1.shape[1]
K12 = sigma_b**2 + (sigma_w**2)*x1 @ x2.T/d_in
K11 = sigma_b**2 + (sigma_w**2)*x1 @ x1.T/d_in
K22 = sigma_b**2 + (sigma_w**2)*x2 @ x2.T/d_in
theta = np.arccos(K12/np.sqrt(np.multiply(K11, K22)))
return sigma_b**2 + (sigma_w**2/(2*np.pi))*np.sqrt(np.multiply(K11, K22))*(np.sin(theta) + np.multiply((np.pi - theta), np.cos(theta)))
def NN_kernel_multidim_2(x1, x2, kernel_params):
"""test: use the vector version to construct the matrix one column at a time"""
len_x1 = x1.shape[0]
len_x2 = x2.shape[0]
K = np.zeros((len_x1, len_x2))
for i in range(len_x2):
K[:,i] = np.squeeze(NN_kernel_multidim_vector(x1, x2[i].reshape(1, -1), kernel_params))
return K
def NN_kernel_multidim_vector(x, xstar, kernel_params):
"""equivalent kernel for single layer neural net.
expect x an (N x D) matrix, with N points and D dimensions
expect xstar a (1 x D) matrix, which is the test point"""
sigma_b = kernel_params['sigma_b']
sigma_w = kernel_params['sigma_w']
N = x.shape[0]
d_in = x.shape[1]
#import pdb; pdb.set_trace()
K12 = sigma_b**2 + (sigma_w**2) * x @ xstar.T / d_in
K11 = sigma_b**2 + (sigma_w**2) * np.sum(x**2, axis=1) / d_in
K22 = sigma_b**2 + (sigma_w**2) * np.sum(xstar**2, axis=1) / d_in
K11 = K11.reshape(N, 1)
K22 = K22[0]
if np.min(K12/np.sqrt(K11 * K22)) < -1:
import pdb; pdb.set_trace()
if np.max(K12/np.sqrt(K11 * K22)) > 1:
import pdb; pdb.set_trace()
theta = np.arccos(K12/np.sqrt(K11 * K22))
return sigma_b**2 + (sigma_w**2/(2*np.pi))*np.sqrt(K11 * K22)*(np.sin(theta) + np.multiply((np.pi - theta), np.cos(theta)))
def GP_sample(x, kernel_function, kernel_params, sigma_n, num_samples=1): # sample from the prior
K = kernel_function(x, x, kernel_params)
num_points = x.shape[0]
mean = np.zeros(num_points)
cov = K + (sigma_n**2) * np.eye(num_points)
y = np.random.multivariate_normal(mean, cov, num_samples)
return y
def GP_predict(x, y, xstar, kernel_function, kernel_function_vector, kernel_params, sigma_n):
K = kernel_function(x, x, kernel_params)
# Algorithm 2.1 in Rasmussen and Williams
L = np.linalg.cholesky(K + (sigma_n**2)*np.eye(len(x)))
alpha = np.linalg.solve(L.T, (np.linalg.solve(L, y)))
k_star_stars = np.diag(kernel_function(xstar, xstar, kernel_params))
pred_mean = np.zeros(len(xstar))
pred_var = np.zeros(len(xstar))
# predictive mean and variance at a test point
for i in range(len(xstar)):
#import pdb; pdb.set_trace()
kstar = kernel_function_vector(x, xstar[i].reshape(1, -1), kernel_params) # so i get a column vector?
kstar = np.squeeze(kstar)
pred_mean[i] = np.dot(kstar, alpha)
v = np.linalg.solve(L, kstar)
#import pdb; pdb.set_trace()
pred_var[i] = k_star_stars[i] - np.dot(v,v)
return pred_mean, pred_var
if __name__ == "__main__":
filename = './/experiments//NNkernel.pdf'
data_location = '..//vision//data//1D_COSINE//1d_cosine_separated.pkl'
# # get dataset
# with open(data_location, 'rb') as f:
# data_load = pickle.load(f)
# x = data_load[:,0]
# y = data_load[:,1]
# hyperparameters
sigma_n = 0.1 # noise standard deviation
NN_params = {'sigma_b':1, 'sigma_w':4} # sigma_w is the same as Neal's omega
SE_params = {'sigma':1, 'l':0.5}
# generate 2D grid of input points
points_per_axis = 40
x = np.linspace(-2, 2, points_per_axis)
y = np.linspace(-2, 2, points_per_axis)
x_grid, y_grid = np.meshgrid(x,y)
x_grid_flattened = x_grid.reshape(-1,1)
y_grid_flattened = y_grid.reshape(-1,1)
xy_flattened = np.stack((np.squeeze(x_grid_flattened), np.squeeze(y_grid_flattened)), axis=-1)
# evaluate at two separated clusters to form a dataset
xy_data_1 = np.random.multivariate_normal([1, 1], 0.01*np.eye(2), 50)
xy_data_2 = np.random.multivariate_normal([-1, -1], 0.01*np.eye(2), 50)
xy_data = np.concatenate((xy_data_1, xy_data_2))
# sample from prior and fit
num_samples = 1
output_data = GP_sample(xy_data, NN_kernel_multidim_2, NN_params, sigma_n, num_samples=num_samples)
output_data = np.squeeze(output_data)
pred_mean, pred_var = GP_predict(xy_data, output_data, xy_flattened, NN_kernel_multidim_2, NN_kernel_multidim_vector, NN_params, sigma_n)
# plot 1D diagonal slice underneath
lambdas = np.linspace(-2*np.sqrt(2), 2*np.sqrt(2), 500)
xlambdas = np.zeros((500, 2))
for i in range(500):
xlambdas[i, :] = np.array([np.sqrt(2)/2, np.sqrt(2)/2]) * lambdas[i]
# make predictions along the diagonal
pred_mean_lambdas, pred_var_lambdas = GP_predict(xy_data, output_data, xlambdas, NN_kernel_multidim_2, NN_kernel_multidim_vector, NN_params, sigma_n)
# project the xy_data onto lambda values
unit_vec = [[np.sqrt(2)/2],[np.sqrt(2)/2]]
projected_xy = xy_data @ unit_vec
projected_xy = np.squeeze(projected_xy)
# pickle everything as numpy arrays for posterity
inputs = xy_data
outputs = output_data
pickle_location = 'experiments/prior_sample_2d/data.pkl'
outfile = open(pickle_location, 'wb')
pickle.dump(inputs, outfile)
pickle.dump(outputs, outfile)
pickle.dump(pred_mean, outfile)
pickle.dump(pred_var, outfile)
pickle.dump(lambdas, outfile)
pickle.dump(xlambdas, outfile)
pickle.dump(pred_mean_lambdas, outfile)
pickle.dump(pred_var_lambdas, outfile)
outfile.close()
# MEAN PLOT
# Plot figure with subplots of different sizes
fig, axes = plt.subplots(nrows=2, ncols=1, gridspec_kw={'height_ratios':[3, 1]}, figsize=(6.2, 8))
cnt = axes[0].contourf(x_grid, y_grid, pred_mean.reshape(points_per_axis, points_per_axis), 200)
axes[0].set_aspect('equal')
axes[0].set_xlabel('$x_1$')
axes[0].set_ylabel('$x_2$')
axes[0].set_title('$\mathbb{E}[f(\mathbf{x})]$')
for c in cnt.collections:
c.set_edgecolor("face")
axes[0].plot(xy_data[:,0], xy_data[:,1], 'r+')
# plot diagonal line
axes[0].plot([0, 1], [0, 1], 'k--', transform=axes[0].transAxes)
# plot 1D slice
axes[1].plot(lambdas, pred_mean_lambdas)
plt.fill_between(lambdas, pred_mean_lambdas + 2 * np.sqrt(pred_var_lambdas),
pred_mean_lambdas - 2 * np.sqrt(pred_var_lambdas), color='b', alpha=0.3)
axes[1].set_xlabel('$\lambda$')
axes[1].set_ylabel('$f(\mathbf{x}(\lambda))$')
# plot projected datapoints
plt.plot(projected_xy, output_data, 'r+')
bbox_ax_top = axes[0].get_position()
bbox_ax_bottom = axes[1].get_position()
cbar_im1a_ax = fig.add_axes([0.9, bbox_ax_top.y0, 0.02, bbox_ax_top.y1-bbox_ax_top.y0])
cbar_im1a = plt.colorbar(cnt, cax=cbar_im1a_ax)
plt.savefig('experiments/prior_sample_2d/pred_mean.pdf')
plt.close()
# VARIANCE PLOT
# Plot figure with subplots of different sizes
fig, axes = plt.subplots(nrows=2, ncols=1, gridspec_kw={'height_ratios':[3, 1]}, figsize=(6.2, 8))
cnt = axes[0].contourf(x_grid, y_grid, pred_var.reshape(points_per_axis, points_per_axis), 200)
axes[0].set_aspect('equal')
axes[0].set_xlabel('$x_1$')
axes[0].set_ylabel('$x_2$')
axes[0].set_title('Var$[f(\mathbf{x})]$')
for c in cnt.collections:
c.set_edgecolor("face")
axes[0].plot(xy_data[:,0], xy_data[:,1], 'r+')
# plot diagonal line
axes[0].plot([0, 1], [0, 1], 'k--', transform=axes[0].transAxes)
# plot 1D slice
axes[1].plot(lambdas, pred_var_lambdas)
axes[1].set_xlabel('$\lambda$')
axes[1].set_ylabel('Var$[f(\mathbf{x}(\lambda))]$')
axes[1].set_ylim(bottom=0)
# plot projected datapoints
axes[1].plot(projected_xy, np.zeros_like(projected_xy), 'r|', markersize=30)
bbox_ax_top = axes[0].get_position()
bbox_ax_bottom = axes[1].get_position()
cbar_im1a_ax = fig.add_axes([0.9, bbox_ax_top.y0, 0.02, bbox_ax_top.y1-bbox_ax_top.y0])
cbar_im1a = plt.colorbar(cnt, cax=cbar_im1a_ax)
plt.savefig('experiments/prior_sample_2d/pred_var.pdf')
plt.close()
# SD PLOT
# Plot figure with subplots of different sizes
fig, axes = plt.subplots(nrows=2, ncols=1, gridspec_kw={'height_ratios':[3, 1]}, figsize=(6.2, 8))
cnt = axes[0].contourf(x_grid, y_grid, np.sqrt(pred_var).reshape(points_per_axis, points_per_axis), 200)
axes[0].set_aspect('equal')
axes[0].set_xlabel('$x_1$')
axes[0].set_ylabel('$x_2$')
axes[0].set_title('$\sigma[f(\mathbf{x})]$')
for c in cnt.collections:
c.set_edgecolor("face")
axes[0].plot(xy_data[:,0], xy_data[:,1], 'r+')
# plot diagonal line
axes[0].plot([0, 1], [0, 1], 'k--', transform=axes[0].transAxes)
# plot 1D slice
axes[1].plot(lambdas, np.sqrt(pred_var_lambdas))
axes[1].set_xlabel('$\lambda$')
axes[1].set_ylabel('$\sigma[f(\mathbf{x}(\lambda))]$')
axes[1].set_ylim(bottom=0)
# plot projected datapoints
axes[1].plot(projected_xy, np.zeros_like(projected_xy), 'r|', markersize=30)
bbox_ax_top = axes[0].get_position()
bbox_ax_bottom = axes[1].get_position()
cbar_im1a_ax = fig.add_axes([0.9, bbox_ax_top.y0, 0.02, bbox_ax_top.y1-bbox_ax_top.y0])
cbar_im1a = plt.colorbar(cnt, cax=cbar_im1a_ax)
plt.savefig('experiments/prior_sample_2d/pred_sd.pdf')
plt.close()
# plt.figure(2)
# pred_var = pred_var.reshape(points_per_axis, points_per_axis)
# cnt = plt.contourf(x_grid, y_grid, pred_var, 40)
# plt.scatter(xy_data[:,0], xy_data[:,1])
# plt.colorbar()
# plt.xlabel('x')
# plt.ylabel('y')
# plt.title('predictive variance')
# for c in cnt.collections:
# c.set_edgecolor("face")
# plt.savefig('experiments/prior_sample_2d/pred_var.pdf')
# plt.figure(3)
# pred_sd = np.sqrt(pred_var)
# cnt = plt.contourf(x_grid, y_grid, pred_sd, 40)
# plt.scatter(xy_data[:,0], xy_data[:,1])
# plt.colorbar()
# plt.xlabel('x')
# plt.ylabel('y')
# plt.title('predictive standard deviation')
# for c in cnt.collections:
# c.set_edgecolor("face")
# plt.savefig('experiments/prior_sample_2d/pred_sd.pdf')
# # sample from the prior
# num_samples = 1
# output_flattened = GP_sample(xy_flattened, NN_kernel_multidim, NN_params, sigma_n, num_samples=num_samples)
# # plot things in 2D
# output = output_flattened.reshape(points_per_axis, points_per_axis)
# cnt = plt.contourf(x_grid, y_grid, output, 40)
# plt.colorbar()
# plt.xlabel('x')
# plt.ylabel('y')
# # plt.show()
# for c in cnt.collections:
# c.set_edgecolor("face")
# plt.savefig('experiments/prior_sample_2d/NN_kernel.pdf')
# x_star = np.linspace(-2, 2, num=1000)
# # evaluate at two separated clusters to form a dataset
# x_data_1 = np.random.multivariate_normal([1], [[0.01]], 50)
# x_data_2 = np.random.multivariate_normal([-1], [[0.01]], 50)
# x_data = np.concatenate((x_data_1, x_data_2))
# x_data = np.squeeze(x_data)
# # sample from the prior
# num_samples = 1
# y_data = GP_sample(x_data, NN_kernel, NN_params, sigma_n, num_samples=num_samples)
# y_data = np.squeeze(y_data)
# pred_mean, pred_var = GP_predict(x_data, y_data, x_star, NN_kernel, NN_params, sigma_n)
# # plot GP fit
# plt.figure(1)
# plt.xlabel('$x$')
# plt.ylabel('$y$')
# plt.xlim([-2, 2])
# plt.plot(x_data, y_data, 'k+')
# plt.plot(x_star, pred_mean, color='b')
# plt.fill_between(x_star, pred_mean + 2 * np.sqrt(pred_var),
# pred_mean - 2 * np.sqrt(pred_var), color='b', alpha=0.3)
# plt.savefig('experiments/prior_sample_2d/predictive.pdf')
# # plot variance
# plt.figure(2)
# plt.xlabel('$x$')
# plt.ylabel('$Var[f(x)]$')
# plt.xlim([-2, 2])
# plt.plot(x_data, np.zeros_like(x_data), 'k|', markersize = 30)
# plt.plot(x_star, pred_var, color='b')
# plt.gca().set_ylim(bottom=0)
# plt.savefig('experiments/prior_sample_2d/variance.pdf')
# # plot standard deviation
# plt.figure(3)
# plt.xlabel('$x$')
# plt.ylabel('$\sigma[f(x)]$')
# plt.xlim([-2, 2])
# plt.plot(x_data, np.zeros_like(x_data), 'k|', markersize = 30)
# plt.plot(x_star, np.sqrt(pred_var), color='b')
# plt.gca().set_ylim(bottom=0)
# plt.savefig('experiments/prior_sample_2d/standard_deviation.pdf')
# # pickle the datapoints
# pickle_location = 'experiments/prior_sample_2d/data.pkl'
# outfile = open(pickle_location, 'wb')
# pickle.dump(x_data, outfile)
# pickle.dump(y_data, outfile)
# pickle.dump(pred_mean, outfile)
# pickle.dump(pred_var, outfile)
# outfile.close()
# # plot GP fit
# plt.figure(1)
# plt.xlabel('$x$')
# plt.ylabel('$y$')
# # plt.ylim([-2, 2])
# plt.xlim([-2, 2])
# for i in range(num_samples):
# plt.plot(x, y[i,:])
# plt.savefig('experiments/prior_sample/NN_kernel.pdf')
##############
# xstar = np.linspace(-3, 3, num=1000)
# K = make_K_SE(x, sigma, l)
# # Algorithm 2.1 in Rasmussen and Williams
# L = np.linalg.cholesky(K + (sigma_n**2)*np.eye(len(x)))
# alpha = np.linalg.solve(L.T, (np.linalg.solve(L, y)))
# pred_mean = np.zeros(len(xstar))
# pred_var = np.zeros(len(xstar))
# # predictive mean and variance at a test point
# for i in range(len(xstar)):
# kstar = make_Kstar_SE(x, xstar[i], sigma, l)
# pred_mean[i] = np.dot(kstar, alpha)
# v = np.linalg.solve(L, kstar)
# pred_var[i] = SE_kernel(xstar[i], xstar[i], sigma, l) - np.dot(v,v)
#############
# plot prior draws
# no_samp = 10
# x_in = np.linspace(-20, 20, 1000)
# cov = NN_kernel(x_in, x_in, NN_params) + 1e-6 * np.eye(1000)
# L = np.linalg.cholesky(cov)
# sample = L @ np.random.randn(x_in.shape[0], no_samp)
# # plot sample
# for i in range(no_samp):
# plt.plot(x_in, sample[:,i])
# plt.show()
# # plot GP fit
# plt.figure(1)
# plt.xlabel('$x$')
# plt.ylabel('$y$')
# plt.ylim([-3, 3])
# plt.xlim([-3, 3])
# plt.plot(x, y, 'k+')
# plt.plot(xstar, pred_mean, color='b')
# plt.fill_between(xstar, pred_mean + np.sqrt(pred_var),
# pred_mean - np.sqrt(pred_var), color='b', alpha=0.3)
# plt.savefig('experiments/test_bound/predictive.pdf')
# # plot just the variance
# plt.figure(2)
# plt.xlabel('$x$')
# plt.ylabel('$Var[f(x)]$')
# plt.ylim([0, 1.2])
# plt.xlim([-2, 2])
# plt.plot(x, np.zeros_like(x), 'k|', markersize=30)
# plt.plot(xstar, pred_var, color='b')
# plt.savefig('experiments/test_bound/variance.pdf')
# # plot just the S.D.
# plt.figure(3)
# plt.xlabel('$x$')
# plt.ylabel('$\sigma[f(x)]$')
# plt.ylim([0, 1.2])
# plt.xlim([-2, 2])
# plt.plot(x, np.zeros_like(x), 'k|', markersize=30)
# plt.plot(xstar, np.sqrt(pred_var), color='b')
# plt.savefig('experiments/test_bound/sd.pdf')
# plt.show()
# plt.savefig(filename)
# pickle everything as numpy arrays for posterity
# inputs = xstar
# mean = pred_mean
# sd = np.sqrt(pred_var + sigma_n**2)
# pickle_location = os.path.join('experiments', 'plot_GP')
# outfile = open(pickle_location, 'wb')
# pickle.dump(inputs, outfile)
# pickle.dump(mean, outfile)
# pickle.dump(sd, outfile)
# outfile.close()
| [
"pickle.dump",
"numpy.sum",
"numpy.sin",
"numpy.tile",
"numpy.exp",
"numpy.linalg.solve",
"numpy.meshgrid",
"numpy.zeros_like",
"numpy.multiply",
"matplotlib.pyplot.close",
"matplotlib.pyplot.colorbar",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"numpy.cos",
"numpy.squeeze",
"nump... | [((547, 564), 'numpy.tile', 'np.tile', (['xstar', 'N'], {}), '(xstar, N)\n', (554, 564), True, 'import numpy as np\n'), ((1399, 1413), 'numpy.array', 'np.array', (['[x1]'], {}), '([x1])\n', (1407, 1413), True, 'import numpy as np\n'), ((1423, 1437), 'numpy.array', 'np.array', (['[x2]'], {}), '([x2])\n', (1431, 1437), True, 'import numpy as np\n'), ((2647, 2673), 'numpy.zeros', 'np.zeros', (['(len_x1, len_x2)'], {}), '((len_x1, len_x2))\n', (2655, 2673), True, 'import numpy as np\n'), ((3963, 3983), 'numpy.zeros', 'np.zeros', (['num_points'], {}), '(num_points)\n', (3971, 3983), True, 'import numpy as np\n'), ((4041, 4094), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean', 'cov', 'num_samples'], {}), '(mean, cov, num_samples)\n', (4070, 4094), True, 'import numpy as np\n'), ((5592, 5627), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', 'points_per_axis'], {}), '(-2, 2, points_per_axis)\n', (5603, 5627), True, 'import numpy as np\n'), ((5636, 5671), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', 'points_per_axis'], {}), '(-2, 2, points_per_axis)\n', (5647, 5671), True, 'import numpy as np\n'), ((5693, 5710), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (5704, 5710), True, 'import numpy as np\n'), ((6121, 6159), 'numpy.concatenate', 'np.concatenate', (['(xy_data_1, xy_data_2)'], {}), '((xy_data_1, xy_data_2))\n', (6135, 6159), True, 'import numpy as np\n'), ((6335, 6358), 'numpy.squeeze', 'np.squeeze', (['output_data'], {}), '(output_data)\n', (6345, 6358), True, 'import numpy as np\n'), ((6621, 6639), 'numpy.zeros', 'np.zeros', (['(500, 2)'], {}), '((500, 2))\n', (6629, 6639), True, 'import numpy as np\n'), ((7088, 7112), 'numpy.squeeze', 'np.squeeze', (['projected_xy'], {}), '(projected_xy)\n', (7098, 7112), True, 'import numpy as np\n'), ((7323, 7351), 'pickle.dump', 'pickle.dump', (['inputs', 'outfile'], {}), '(inputs, outfile)\n', (7334, 7351), False, 'import pickle\n'), ((7356, 7385), 'pickle.dump', 'pickle.dump', (['outputs', 'outfile'], {}), '(outputs, outfile)\n', (7367, 7385), False, 'import pickle\n'), ((7390, 7421), 'pickle.dump', 'pickle.dump', (['pred_mean', 'outfile'], {}), '(pred_mean, outfile)\n', (7401, 7421), False, 'import pickle\n'), ((7426, 7456), 'pickle.dump', 'pickle.dump', (['pred_var', 'outfile'], {}), '(pred_var, outfile)\n', (7437, 7456), False, 'import pickle\n'), ((7461, 7490), 'pickle.dump', 'pickle.dump', (['lambdas', 'outfile'], {}), '(lambdas, outfile)\n', (7472, 7490), False, 'import pickle\n'), ((7495, 7525), 'pickle.dump', 'pickle.dump', (['xlambdas', 'outfile'], {}), '(xlambdas, outfile)\n', (7506, 7525), False, 'import pickle\n'), ((7530, 7569), 'pickle.dump', 'pickle.dump', (['pred_mean_lambdas', 'outfile'], {}), '(pred_mean_lambdas, outfile)\n', (7541, 7569), False, 'import pickle\n'), ((7574, 7612), 'pickle.dump', 'pickle.dump', (['pred_var_lambdas', 'outfile'], {}), '(pred_var_lambdas, outfile)\n', (7585, 7612), False, 'import pickle\n'), ((7718, 7809), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(1)', 'gridspec_kw': "{'height_ratios': [3, 1]}", 'figsize': '(6.2, 8)'}), "(nrows=2, ncols=1, gridspec_kw={'height_ratios': [3, 1]},\n figsize=(6.2, 8))\n", (7730, 7809), True, 'import matplotlib.pyplot as plt\n'), ((8618, 8659), 'matplotlib.pyplot.plot', 'plt.plot', (['projected_xy', 'output_data', '"""r+"""'], {}), "(projected_xy, output_data, 'r+')\n", (8626, 8659), True, 'import matplotlib.pyplot as plt\n'), ((8855, 8890), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['cnt'], {'cax': 'cbar_im1a_ax'}), '(cnt, cax=cbar_im1a_ax)\n', (8867, 8890), True, 'import matplotlib.pyplot as plt\n'), ((8896, 8952), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""experiments/prior_sample_2d/pred_mean.pdf"""'], {}), "('experiments/prior_sample_2d/pred_mean.pdf')\n", (8907, 8952), True, 'import matplotlib.pyplot as plt\n'), ((8957, 8968), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8966, 8968), True, 'import matplotlib.pyplot as plt\n'), ((9057, 9148), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(1)', 'gridspec_kw': "{'height_ratios': [3, 1]}", 'figsize': '(6.2, 8)'}), "(nrows=2, ncols=1, gridspec_kw={'height_ratios': [3, 1]},\n figsize=(6.2, 8))\n", (9069, 9148), True, 'import matplotlib.pyplot as plt\n'), ((10089, 10124), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['cnt'], {'cax': 'cbar_im1a_ax'}), '(cnt, cax=cbar_im1a_ax)\n', (10101, 10124), True, 'import matplotlib.pyplot as plt\n'), ((10130, 10185), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""experiments/prior_sample_2d/pred_var.pdf"""'], {}), "('experiments/prior_sample_2d/pred_var.pdf')\n", (10141, 10185), True, 'import matplotlib.pyplot as plt\n'), ((10190, 10201), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10199, 10201), True, 'import matplotlib.pyplot as plt\n'), ((10284, 10375), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(1)', 'gridspec_kw': "{'height_ratios': [3, 1]}", 'figsize': '(6.2, 8)'}), "(nrows=2, ncols=1, gridspec_kw={'height_ratios': [3, 1]},\n figsize=(6.2, 8))\n", (10296, 10375), True, 'import matplotlib.pyplot as plt\n'), ((11340, 11375), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['cnt'], {'cax': 'cbar_im1a_ax'}), '(cnt, cax=cbar_im1a_ax)\n', (11352, 11375), True, 'import matplotlib.pyplot as plt\n'), ((11381, 11435), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""experiments/prior_sample_2d/pred_sd.pdf"""'], {}), "('experiments/prior_sample_2d/pred_sd.pdf')\n", (11392, 11435), True, 'import matplotlib.pyplot as plt\n'), ((11440, 11451), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11449, 11451), True, 'import matplotlib.pyplot as plt\n'), ((416, 426), 'numpy.exp', 'np.exp', (['km'], {}), '(km)\n', (422, 426), True, 'import numpy as np\n'), ((676, 685), 'numpy.exp', 'np.exp', (['v'], {}), '(v)\n', (682, 685), True, 'import numpy as np\n'), ((1006, 1016), 'numpy.exp', 'np.exp', (['km'], {}), '(km)\n', (1012, 1016), True, 'import numpy as np\n'), ((3509, 3524), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (3522, 3524), False, 'import pdb\n'), ((3588, 3603), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (3601, 3603), False, 'import pdb\n'), ((4389, 4410), 'numpy.linalg.solve', 'np.linalg.solve', (['L', 'y'], {}), '(L, y)\n', (4404, 4410), True, 'import numpy as np\n'), ((4807, 4824), 'numpy.squeeze', 'np.squeeze', (['kstar'], {}), '(kstar)\n', (4817, 4824), True, 'import numpy as np\n'), ((4848, 4868), 'numpy.dot', 'np.dot', (['kstar', 'alpha'], {}), '(kstar, alpha)\n', (4854, 4868), True, 'import numpy as np\n'), ((4881, 4906), 'numpy.linalg.solve', 'np.linalg.solve', (['L', 'kstar'], {}), '(L, kstar)\n', (4896, 4906), True, 'import numpy as np\n'), ((9844, 9871), 'numpy.zeros_like', 'np.zeros_like', (['projected_xy'], {}), '(projected_xy)\n', (9857, 9871), True, 'import numpy as np\n'), ((10879, 10904), 'numpy.sqrt', 'np.sqrt', (['pred_var_lambdas'], {}), '(pred_var_lambdas)\n', (10886, 10904), True, 'import numpy as np\n'), ((11095, 11122), 'numpy.zeros_like', 'np.zeros_like', (['projected_xy'], {}), '(projected_xy)\n', (11108, 11122), True, 'import numpy as np\n'), ((3631, 3649), 'numpy.sqrt', 'np.sqrt', (['(K11 * K22)'], {}), '(K11 * K22)\n', (3638, 3649), True, 'import numpy as np\n'), ((4013, 4031), 'numpy.eye', 'np.eye', (['num_points'], {}), '(num_points)\n', (4019, 4031), True, 'import numpy as np\n'), ((4984, 4996), 'numpy.dot', 'np.dot', (['v', 'v'], {}), '(v, v)\n', (4990, 4996), True, 'import numpy as np\n'), ((5827, 5855), 'numpy.squeeze', 'np.squeeze', (['x_grid_flattened'], {}), '(x_grid_flattened)\n', (5837, 5855), True, 'import numpy as np\n'), ((5857, 5885), 'numpy.squeeze', 'np.squeeze', (['y_grid_flattened'], {}), '(y_grid_flattened)\n', (5867, 5885), True, 'import numpy as np\n'), ((6016, 6025), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (6022, 6025), True, 'import numpy as np\n'), ((6092, 6101), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (6098, 6101), True, 'import numpy as np\n'), ((6575, 6585), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (6582, 6585), True, 'import numpy as np\n'), ((6589, 6599), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (6596, 6599), True, 'import numpy as np\n'), ((1613, 1634), 'numpy.multiply', 'np.multiply', (['K11', 'K22'], {}), '(K11, K22)\n', (1624, 1634), True, 'import numpy as np\n'), ((1717, 1730), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1723, 1730), True, 'import numpy as np\n'), ((2290, 2311), 'numpy.multiply', 'np.multiply', (['K11', 'K22'], {}), '(K11, K22)\n', (2301, 2311), True, 'import numpy as np\n'), ((2394, 2407), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2400, 2407), True, 'import numpy as np\n'), ((3300, 3322), 'numpy.sum', 'np.sum', (['(x ** 2)'], {'axis': '(1)'}), '(x ** 2, axis=1)\n', (3306, 3322), True, 'import numpy as np\n'), ((3366, 3392), 'numpy.sum', 'np.sum', (['(xstar ** 2)'], {'axis': '(1)'}), '(xstar ** 2, axis=1)\n', (3372, 3392), True, 'import numpy as np\n'), ((3463, 3481), 'numpy.sqrt', 'np.sqrt', (['(K11 * K22)'], {}), '(K11 * K22)\n', (3470, 3481), True, 'import numpy as np\n'), ((3543, 3561), 'numpy.sqrt', 'np.sqrt', (['(K11 * K22)'], {}), '(K11 * K22)\n', (3550, 3561), True, 'import numpy as np\n'), ((3699, 3717), 'numpy.sqrt', 'np.sqrt', (['(K11 * K22)'], {}), '(K11 * K22)\n', (3706, 3717), True, 'import numpy as np\n'), ((3719, 3732), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (3725, 3732), True, 'import numpy as np\n'), ((7001, 7011), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7008, 7011), True, 'import numpy as np\n'), ((7016, 7026), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7023, 7026), True, 'import numpy as np\n'), ((8382, 8407), 'numpy.sqrt', 'np.sqrt', (['pred_var_lambdas'], {}), '(pred_var_lambdas)\n', (8389, 8407), True, 'import numpy as np\n'), ((8446, 8471), 'numpy.sqrt', 'np.sqrt', (['pred_var_lambdas'], {}), '(pred_var_lambdas)\n', (8453, 8471), True, 'import numpy as np\n'), ((10414, 10431), 'numpy.sqrt', 'np.sqrt', (['pred_var'], {}), '(pred_var)\n', (10421, 10431), True, 'import numpy as np\n'), ((1693, 1714), 'numpy.multiply', 'np.multiply', (['K11', 'K22'], {}), '(K11, K22)\n', (1704, 1714), True, 'import numpy as np\n'), ((1762, 1775), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1768, 1775), True, 'import numpy as np\n'), ((2370, 2391), 'numpy.multiply', 'np.multiply', (['K11', 'K22'], {}), '(K11, K22)\n', (2381, 2391), True, 'import numpy as np\n'), ((2439, 2452), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2445, 2452), True, 'import numpy as np\n'), ((3764, 3777), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (3770, 3777), True, 'import numpy as np\n'), ((6700, 6710), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (6707, 6710), True, 'import numpy as np\n'), ((6714, 6724), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (6721, 6724), True, 'import numpy as np\n')] |
from _commons import warn, error, create_dir_path
import numpy as np
import time
from movielens import MovieLens
import random
#'binary_unknown'
class LinUserbase:
def __init__(self, alpha, dataset=None, max_items=500, allow_selecting_known_arms=True, fixed_rewards=True,
prob_reward_p=0.9):
if dataset is None:
self.dataset = MovieLens(variant='ml-100k',
pos_rating_threshold=4,
data_augmentation_mode='binary_unknown')
else:
self.dataset = dataset
self.dataset.shrink(max_items)
self.dataset.add_random_ratings(num_to_each_user=3)
self.alpha = alpha
self.fixed_rewards = fixed_rewards
self.prob_reward_p = prob_reward_p
self.users_with_unrated_items = np.array(range(self.dataset.num_users))
self.monitored_user = np.random.choice(self.users_with_unrated_items)
self.allow_selecting_known_arms = allow_selecting_known_arms
self.d = self.dataset.arm_feature_dim
self.b = np.zeros(shape=(self.dataset.num_items, self.d))
# More efficient way to create array of identity matrices of length num_items
print("\nInitializing matrix A of shape {} which will require {}MB of memory."
.format((self.dataset.num_items, self.d, self.d), 8 * self.dataset.num_items * self.d * self.d / 1e6))
# 这个matrix A有三个维度,分别为被推荐的item的数量,每个矩阵的维度都是arm_feature的维度,是单位矩阵
self.A = np.repeat(np.identity(self.d, dtype=float)[np.newaxis, :, :], self.dataset.num_items, axis=0)
print("\nLinUCB successfully initialized.")
def find_top_k_similarity(self, t, k = 2):
"""
Choose an arm to pull = item to recommend to user t that he did not rate yet.
:param t: User_id of user to recommend to.
:param unknown_item_ids: Indexes of items that user t has not rated yet.
:return: the top k nearest user of t and its similarity
"""
R = self.dataset.orig_R_shrink
user_num = R.shape[0]
sim = np.zeros(user_num)
self.k= k
for user_id in range(user_num):
if user_id != t:
simlarity = np.dot(R[t], R[user_id]) / (np.linalg.norm(R[t]) * np.linalg.norm(R[user_id]))
if np.isnan(simlarity) | (simlarity > 1):
sim[user_id] = -1
else:
sim[user_id] = simlarity
else:
sim[user_id] = 1
user_pos_index = np.where(sim > 0)
user_top_k = np.argsort(sim)[-k:]
# 两个集合取交集
user_top_k = np.intersect1d(user_pos_index, user_top_k)
sim_top_k = sim[user_top_k]
return user_top_k, sim_top_k
def choose_arm(self, t, unknown_item_ids, verbosity):
"""
Choose an arm to pull = item to recommend to user t that he did not rate yet.
:param t: User_id of user to recommend to.
:param unknown_item_ids: Indexes of items that user t has not rated yet.
:return: Received reward for selected item = 1/0 = user liked/disliked item.
"""
A = self.A
b = self.b
top_k_user, top_k_user_sim = self.find_top_k_similarity(t=t)
top_k_user_weight = top_k_user_sim / np.sum(top_k_user_sim)
top_k_user_weight = top_k_user_weight.reshape(-1,1) # 将行向量转化为列向量
count = 0;
#item_ids = unknown_item_ids
item_ids = range(self.dataset.num_items)
P_t = np.zeros(shape=(len(top_k_user), self.dataset.num_items))
for neighbor in top_k_user:
arm_features = self.dataset.get_features_of_current_arms(t=neighbor)
# arm_features 是一个由评分和genre拼合而成的矩阵
p_t = np.zeros(shape=(arm_features.shape[0],), dtype=float)
p_t -= 9999 # I never want to select the already rated items
if self.allow_selecting_known_arms:
item_ids = range(self.dataset.num_items)
p_t += 9999
for a in item_ids: # iterate over all arms
'''
x_ta = arm_features[a].reshape(arm_features[a].shape[0], 1) # make a column vector
A_a_inv = np.linalg.inv(A[a])
theta_a = A_a_inv.dot(b[a])
p_t[a] = theta_a.T.dot(x_ta) + self.alpha * np.sqrt(x_ta.T.dot(A_a_inv).dot(x_ta))
'''
#if a in self.dataset.get_uknown_items_of_user(neighbor):
#print(self.dataset.get_uknown_items_of_user(neighbor))
#print(unknown_item_ids)
#print('--------------')
if a in self.dataset.get_uknown_items_of_user(neighbor):
# 调取arm_feature矩阵的第a行,第a行是user_id 对所有历史item的评分,再与这个电影的genre拼合起来
x_ta = arm_features[a].reshape(arm_features[a].shape[0], 1) # make a column vector
A_a_inv = np.linalg.inv(A[a])
theta_a = A_a_inv.dot(b[a])
p_t[a] = theta_a.T.dot(x_ta) + self.alpha * np.sqrt(x_ta.T.dot(A_a_inv).dot(x_ta))
else:
p_t[a] = self.dataset.recommend(user_id=neighbor, item_id=a,
fixed_rewards=self.fixed_rewards, prob_reward_p=self.prob_reward_p)
P_t[count] = p_t
count += 1
# 之前P_t是一个行为k个最近user,列为每个item的矩阵,形状为(k,p)
# similar_weight 经转化后是一个k*1形状的矩阵
# 现在讲P_t转置,变为(p,k),与k*1的矩阵相乘,得到p个item的加权均值
P_t = P_t.T
p_t = np.matmul(P_t,top_k_user_weight).flatten()#得到了一个p的行向量
p_t_unknown = p_t[unknown_item_ids];
max_p_t = np.max(p_t)
if max_p_t <= 0:
print("User {} has max p_t={}, p_t={}".format(t, max_p_t, p_t))
# I want to randomly break ties, np.argmax return the first occurence of maximum.
# So I will get all occurences of the max and randomly select between them
max_idxs = np.argwhere(p_t == max_p_t).flatten()
a_t = np.random.choice(max_idxs) # idx of article to recommend to user t
# observed reward = 1/0
r_t = self.dataset.recommend(user_id=t, item_id=a_t,
fixed_rewards=self.fixed_rewards, prob_reward_p=self.prob_reward_p)
if verbosity >= 2:
print("User {} choosing item {} with p_t={} reward {}".format(t, a_t, p_t[a_t], r_t))
x_t_at = arm_features[a_t].reshape(arm_features[a_t].shape[0], 1) # make a column vector
# 对于每一个arm 都会维护一个A maxtrix,只有在这个item被推荐到了,这个item的A矩阵才会有更新
A[a_t] = A[a_t] + x_t_at.dot(x_t_at.T)
b[a_t] = b[a_t] + r_t * x_t_at.flatten() # turn it back into an array because b[a_t] is an array
return r_t
# 每一个epoch是将每个pool中的每一个user都推荐一遍
def run_epoch(self, verbosity=2):
"""
Call choose_arm() for each user in the dataset.
:return: Average received reward.
"""
rewards = []
start_time = time.time()
for i in range(self.dataset.num_users):
start_time_i = time.time()
user_id = self.dataset.get_next_user()
#print(user_id)
# user_id = 1
unknown_item_ids = self.dataset.get_uknown_items_of_user(user_id)
if len(unknown_item_ids) == 0:
continue;
if self.allow_selecting_known_arms == False:
if user_id not in self.users_with_unrated_items:
continue
if unknown_item_ids.size == 0:
print("User {} has no more unknown ratings, skipping him.".format(user_id))
self.users_with_unrated_items = self.users_with_unrated_items[
self.users_with_unrated_items != user_id]
continue
rewards.append(self.choose_arm(user_id, unknown_item_ids, verbosity))
time_i = time.time() - start_time_i
if verbosity >= 2:
print("Choosing arm for user {}/{} ended with reward {} in {}s".format(i, self.dataset.num_users,
rewards[i], time_i))
total_time = time.time() - start_time
avg_reward = np.average(np.array(rewards))
return avg_reward, total_time
def run(self, num_epochs, verbosity=1):
"""
Runs run_epoch() num_epoch times.
:param num_epochs: Number of epochs = iterating over all users.
:return: List of average rewards per epoch.
"""
self.users_with_unrated_items = np.array(range(self.dataset.num_users))
avg_rewards = np.zeros(shape=(num_epochs,), dtype=float)
for i in range(num_epochs):
avg_rewards[i], total_time = self.run_epoch(verbosity)
if verbosity >= 1:
print(
"Finished epoch {}/{} with avg reward {} in {}s".format(i, num_epochs, avg_rewards[i], total_time))
return avg_rewards
| [
"numpy.sum",
"numpy.zeros",
"numpy.identity",
"movielens.MovieLens",
"time.time",
"numpy.argsort",
"numpy.isnan",
"numpy.max",
"numpy.where",
"numpy.array",
"numpy.linalg.inv",
"numpy.matmul",
"numpy.random.choice",
"numpy.argwhere",
"numpy.dot",
"numpy.intersect1d",
"numpy.linalg.no... | [((934, 981), 'numpy.random.choice', 'np.random.choice', (['self.users_with_unrated_items'], {}), '(self.users_with_unrated_items)\n', (950, 981), True, 'import numpy as np\n'), ((1117, 1165), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.dataset.num_items, self.d)'}), '(shape=(self.dataset.num_items, self.d))\n', (1125, 1165), True, 'import numpy as np\n'), ((2148, 2166), 'numpy.zeros', 'np.zeros', (['user_num'], {}), '(user_num)\n', (2156, 2166), True, 'import numpy as np\n'), ((2615, 2632), 'numpy.where', 'np.where', (['(sim > 0)'], {}), '(sim > 0)\n', (2623, 2632), True, 'import numpy as np\n'), ((2719, 2761), 'numpy.intersect1d', 'np.intersect1d', (['user_pos_index', 'user_top_k'], {}), '(user_pos_index, user_top_k)\n', (2733, 2761), True, 'import numpy as np\n'), ((5803, 5814), 'numpy.max', 'np.max', (['p_t'], {}), '(p_t)\n', (5809, 5814), True, 'import numpy as np\n'), ((6168, 6194), 'numpy.random.choice', 'np.random.choice', (['max_idxs'], {}), '(max_idxs)\n', (6184, 6194), True, 'import numpy as np\n'), ((7166, 7177), 'time.time', 'time.time', ([], {}), '()\n', (7175, 7177), False, 'import time\n'), ((8887, 8929), 'numpy.zeros', 'np.zeros', ([], {'shape': '(num_epochs,)', 'dtype': 'float'}), '(shape=(num_epochs,), dtype=float)\n', (8895, 8929), True, 'import numpy as np\n'), ((384, 482), 'movielens.MovieLens', 'MovieLens', ([], {'variant': '"""ml-100k"""', 'pos_rating_threshold': '(4)', 'data_augmentation_mode': '"""binary_unknown"""'}), "(variant='ml-100k', pos_rating_threshold=4, data_augmentation_mode\n ='binary_unknown')\n", (393, 482), False, 'from movielens import MovieLens\n'), ((2655, 2670), 'numpy.argsort', 'np.argsort', (['sim'], {}), '(sim)\n', (2665, 2670), True, 'import numpy as np\n'), ((3393, 3415), 'numpy.sum', 'np.sum', (['top_k_user_sim'], {}), '(top_k_user_sim)\n', (3399, 3415), True, 'import numpy as np\n'), ((3861, 3914), 'numpy.zeros', 'np.zeros', ([], {'shape': '(arm_features.shape[0],)', 'dtype': 'float'}), '(shape=(arm_features.shape[0],), dtype=float)\n', (3869, 3914), True, 'import numpy as np\n'), ((7257, 7268), 'time.time', 'time.time', ([], {}), '()\n', (7266, 7268), False, 'import time\n'), ((8425, 8436), 'time.time', 'time.time', ([], {}), '()\n', (8434, 8436), False, 'import time\n'), ((8483, 8500), 'numpy.array', 'np.array', (['rewards'], {}), '(rewards)\n', (8491, 8500), True, 'import numpy as np\n'), ((1563, 1595), 'numpy.identity', 'np.identity', (['self.d'], {'dtype': 'float'}), '(self.d, dtype=float)\n', (1574, 1595), True, 'import numpy as np\n'), ((5680, 5713), 'numpy.matmul', 'np.matmul', (['P_t', 'top_k_user_weight'], {}), '(P_t, top_k_user_weight)\n', (5689, 5713), True, 'import numpy as np\n'), ((6115, 6142), 'numpy.argwhere', 'np.argwhere', (['(p_t == max_p_t)'], {}), '(p_t == max_p_t)\n', (6126, 6142), True, 'import numpy as np\n'), ((8118, 8129), 'time.time', 'time.time', ([], {}), '()\n', (8127, 8129), False, 'import time\n'), ((2288, 2312), 'numpy.dot', 'np.dot', (['R[t]', 'R[user_id]'], {}), '(R[t], R[user_id])\n', (2294, 2312), True, 'import numpy as np\n'), ((2387, 2406), 'numpy.isnan', 'np.isnan', (['simlarity'], {}), '(simlarity)\n', (2395, 2406), True, 'import numpy as np\n'), ((5055, 5074), 'numpy.linalg.inv', 'np.linalg.inv', (['A[a]'], {}), '(A[a])\n', (5068, 5074), True, 'import numpy as np\n'), ((2316, 2336), 'numpy.linalg.norm', 'np.linalg.norm', (['R[t]'], {}), '(R[t])\n', (2330, 2336), True, 'import numpy as np\n'), ((2339, 2365), 'numpy.linalg.norm', 'np.linalg.norm', (['R[user_id]'], {}), '(R[user_id])\n', (2353, 2365), True, 'import numpy as np\n')] |
"""
The contents of this module are currently experimental and under active
development. More thorough documentation will be done when its development has
settled.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
from itertools import product
import inspect
from numbers import Number
from time import time
import numpy as np
from py_search.base import Node
from py_search.base import Problem
from py_search.informed import best_first_search
from py_search.utils import compare_searches
def levenshtein(source, target):
"""
The levenshtein edit distance, code take from here:
http://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python
"""
if len(source) < len(target):
return levenshtein(target, source)
# So now we have len(source) >= len(target).
if len(target) == 0:
return len(source)
# We call tuple() to force strings to be used as sequences
# ('c', 'a', 't', 's') - numpy uses them as values by default.
source = np.array(tuple(source))
target = np.array(tuple(target))
# We use a dynamic programming algorithm, but with the
# added optimization that we only need the last two rows
# of the matrix.
previous_row = np.arange(target.size + 1)
for s in source:
# Insertion (target grows longer than source):
current_row = previous_row + 1
# Substitution or matching:
# Target and source items are aligned, and either
# are different (cost of 1), or are the same (cost of 0).
current_row[1:] = np.minimum(
current_row[1:],
np.add(previous_row[:-1], target != s))
# Deletion (target grows shorter than source):
current_row[1:] = np.minimum(
current_row[1:],
current_row[0:-1] + 1)
previous_row = current_row
return previous_row[-1]
class ActionPlannerProblem(Problem):
def successors(self, node):
state, chosen, goal = node.state
actions = node.extra["actions"]
tested = node.extra['tested']
if chosen is None:
for ele in state:
if ele not in tested:
attr, val = ele
tested.add(ele)
yield Node((tuple(state), ele, goal), node, attr,
node.cost(), node.extra)
for action in actions:
num_args = len(inspect.getargspec(actions[action]).args)
# TODO pass in some function that filters out the arguments that
# are worth considering otherwise you waste your time searching too
# much.
#possible_args = [ele for ele in state if len(ele[0]) > 0 and
# ele[0][0] == 'value']
possible_args = [ele for ele in state if len(ele[0]) > 0]
#print(len([ele for ele in product(possible_args, repeat=num_args)]))
#for tupled_args in product(state, repeat=num_args):
for tupled_args in product(possible_args, repeat=num_args):
names = [a for a, v in tupled_args]
values = [v for a, v in tupled_args]
new_state = list(state)
action_name = tuple([action] + names)
try:
new_state.append((action_name, actions[action](*values)))
path_cost = node.cost() + 1
yield Node((tuple(new_state), None, goal), node, action_name,
path_cost, node.extra)
except Exception as e:
pass
def goal_test(self, node):
s, chosen, goal = node.state
epsilon = node.extra["epsilon"]
if chosen is None:
return False
attr, v = chosen
if ((not isinstance(goal, bool) and isinstance(goal, Number)) and
(not isinstance(v, bool) and isinstance(v, Number))):
if abs(goal - v) <= epsilon:
return True
elif v == goal:
return True
return False
def heuristic(self, node):
state, chosen, goal = node.state
tested = node.extra['tested']
if chosen is not None:
return 0
#h = float('inf')
h = 1
is_number_goal = (isinstance(goal, Number) and
not isinstance(goal, bool))
if is_number_goal:
for a,v in state:
if (a, v) in tested:
continue
try:
v = float(v)
#vmin = -1000
vmax = 2000
diff = min((goal - v) * (goal - v), vmax)
dist = (diff) / 2000
if dist < h:
h = dist
except:
pass
else:
for a,v in state:
if (a, v) in tested:
continue
if isinstance(v, str):
vmin = -1000
vmax = 1000
diff = max(min(levenshtein(v, goal), vmax), vmin)
dist = (diff + 1000) / 2000
if dist < h:
h = dist
return h
def node_value(self, node):
return node.cost() + self.heuristic(node)
class NoHeuristic(ActionPlannerProblem):
def node_value(self, node):
return node.cost()
class ActionPlanner:
def __init__(self, action_set, act_params=None):
self.action_set = action_set
self.act_params= {'epsilon':0.0,
'depth_limit':2,
'num_expl':1,
'time_limit':float('inf')}
if act_params is not None:
if 'epsilon' in act_params:
if not isinstance(act_params['epsilon'],float) or act_params['epsilon'] < 0.0:
raise ValueError("epsilon must be a float >= 0")
self.act_params['epsilon'] = act_params['epsilon']
if 'depth_limit' in act_params:
if not isinstance(act_params['depth_limit'],int):
raise ValueError("depth_limit must be an integer")
self.act_params['depth_limit'] = act_params['depth_limit']
if 'num_expl' in act_params:
if not isinstance(act_params['num_expl'],int) or act_params['num_expl'] < 1:
raise ValueError('num_expl must be an integer >= 1')
self.act_params['num_expl'] = act_params['num_expl']
if 'time_limit' in act_params:
if not isinstance(act_params['time_limit'],float) or act_params['time_limit'] <= 0.0:
raise ValueError('time_limit must be a float > 0.0')
self.act_params['time_limit'] = act_params['time_limit']
def explain_sai_iter(self, state, sai):
"""
Returns an iterator to explanations for a given SAI in the provided
state
"""
already_found = set()
inp_exps = [[] for ele in sai[2:]]
sai_copy = [ele for ele in sai[:-1]]
for a in sai[-1]:
sai_copy.append(sai[-1][a])
sai = tuple(sai_copy)
print(sai[2:])
inp_iters = [self.explain_value_iter(state, ele) for ele in sai[2:]]
found = True
while found:
found=False
for i,it in enumerate(inp_iters):
try:
value_exp = next(it)
inp_exps[i].append(value_exp)
found=True
except StopIteration:
pass
if found:
for exp in [sai[0:2] + inp for inp in product(*inp_exps)]:
if exp in already_found:
continue
already_found.add(exp)
yield exp
def explain_sai(self, state, sai):
"""
This function generates a number of explainations for a given observed SAI.
"""
num_expl = self.act_params['num_expl']
time_limit = self.act_params['time_limit']
sel = self.explain_value(state, sai[2], num_expl, time_limit)[0]
exps = [self.explain_value(state, ele, num_expl, time_limit) for ele in
[sai[3][a] for a in sai[3]]]
print([sai[0:2] + (sel,) + inp for inp in product(*exps)])
return [sai[0:2] + (sel,) + inp for inp in product(*exps)]
def explain_value_iter(self, state, value):
"""
Returns an iterator for explainations of the provided value in the
provided state.
"""
extra = {}
extra["actions"] = self.action_set.get_function_dict()
extra["epsilon"] = self.act_params['epsilon']
extra['tested'] = set()
depth_limit = self.act_params['depth_limit']
state = {k: state[k] for k in state if k[0] != '_'}
problem = ActionPlannerProblem((tuple(state.items()),
None, value), extra=extra)
try:
for solution in best_first_search(problem, cost_limit=depth_limit):
state, chosen, goal = solution.state
yield chosen[0]
except StopIteration:
pass
yield str(value)
def explain_value(self, state, value, num_expl=1, time_limit=float('inf')):
"""
This function uses a planner compute the given value from the current
state. The function returns a plan.
"""
extra = {}
extra["actions"] = self.action_set.get_function_dict()
extra["epsilon"] = self.act_params['epsilon']
extra['tested'] = set()
depth_limit = self.act_params['depth_limit']
state = {k: state[k] for k in state if k[0] != '_'}
problem = ActionPlannerProblem((tuple(state.items()), None, value),
extra=extra)
explanations = []
#print ("EXPLAINING ", value)
s_time = time()
try:
for solution in best_first_search(problem, cost_limit=depth_limit):
#print(solution)
if len(solution.path()) > 0:
state, chosen, goal = solution.state
#print(chosen, solution.cost())
explanations.append(chosen[0])
if len(explanations) == num_expl:
break
if time() - s_time > time_limit:
break
except StopIteration:
#print("EXPLAIN FAILED")
pass
if len(explanations) == 0:
return [str(value)]
else:
return explanations
def execute_plan(self, plan, state):
actions = self.action_set.get_function_dict()
if plan in state:
if state[plan] == "":
raise Exception("Cannot use empty state elements as values")
return state[plan]
if not isinstance(plan, tuple):
return plan
#print("PLAN!!! ", plan)
args = tuple(self.execute_plan(ele, state) for ele in plan[1:])
action = plan[0]
return actions[action](*args)
def is_sais_equal(self, sai1, sai2):
"""
Given two sais, this tells you if they are equal, taking into account
that two floats might be within epsilon of one another.
>>> ap = ActionPlanner({})
>>> ap.is_sais_equal(('sai', 'update', 3), ('sai', 'update', 3))
True
>>> ap.is_sais_equal(('sai', 'update', 1), ('sai', 'update', 3))
False
"""
if len(sai1) != len(sai2):
return False
for i in range(len(sai1)):
if ((not isinstance(sai1[i], bool) and isinstance(sai1[i], Number)) and
(not isinstance(sai2[i], bool) and isinstance(sai2[i], Number))):
if abs(sai1[i] - sai2[i]) > self.act_params['epsilon']:
return False
elif sai1[i] != sai2[i]:
return False
return True
def compare_plan(self, plan, sai, state):
"""
Given an general plan, a specific sai, and a state return True if the
plan would generate the sai in the context of the state.
"""
if len(plan) != len(sai):
return False
plan = tuple([self.execute_plan(ele, state) for ele in plan])
print('COMPARING')
print(sai)
print(plan)
print('DONE COMPARING')
for i in range(1,len(plan)):
if ((not isinstance(plan[i], bool) and isinstance(plan[i], Number)) and
(not isinstance(sai[i], bool) and isinstance(sai[i], Number))):
if abs(plan[i] - sai[i]) > self.act_params['epsilon']:
return False
elif plan[i] != sai[i]:
return False
return True
#def car(x):
# if isinstance(x, str) and len(x) > 1:
# return x[0]
#
#def cdr(x):
# if isinstance(x, str) and len(x) > 2:
# return x[1:]
#
#def append(x, y):
# if isinstance(x, str) and isinstance(y, str):
# return x + y
#
#def tostring(x):
# return str(x)
def add(x,y):
if isinstance(x, str) and isinstance(y,str):
x = float(x)
y = float(y)
return "%i" % (x+y)
elif ((not isinstance(x, bool) and isinstance(x,Number)) and
(not isinstance(y, bool) and isinstance(y,Number))):
return x+y
else:
raise TypeError("Arguments must both be strings or both be Numbers")
def subtract(x,y):
if isinstance(x, str) and isinstance(y,str):
x = float(x)
y = float(y)
return "%i" % (x-y)
elif ((not isinstance(x, bool) and isinstance(x,Number)) and
(not isinstance(y, bool) and isinstance(y,Number))):
return x-y
else:
raise TypeError("Arguments must both be strings or both be Numbers")
def multiply(x,y):
if isinstance(x, str) and isinstance(y,str):
x = float(x)
y = float(y)
return "%i" % (x*y)
elif ((not isinstance(x, bool) and isinstance(x,Number)) and
(not isinstance(y, bool) and isinstance(y,Number))):
return x*y
else:
raise TypeError("Arguments must both be strings or both be Numbers")
def divide(x,y):
if isinstance(x, str) and isinstance(y,str):
x = float(x)
y = float(y)
return "%i" % (x/y)
elif ((not isinstance(x, bool) and isinstance(x,Number)) and
(not isinstance(y, bool) and isinstance(y,Number))):
return x/y
else:
raise TypeError("Arguments must both be strings or both be Numbers")
math_actions = {
"add":add,
"subtract":subtract,
"multiply":multiply,
"divide":divide
}
#updating math function using pyfunction
#import sys, os
#sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'apprentice_learner'))
#print("HELLO ARE YO thewU")
#import models
#print("HELLOHOW ARE YOU")
#new_function = models.PyFunction.objects.get(id=1)
#math_actions_temp = {
# "subtract":"subtracting",
# "multiply":"multiplying",
# "divide":"dividing"
#}
#print("HEY! I AM back")
#if new_function.name not in list(math_actions_temp.keys()):
# math_actions_temp[new_function.name] = new_function.fun_def
# print("HURRAY!I AM HAPPY")
# print(math_actions_temp)
# print("I AM MORE HAPPY")
#sys.path = os.path.dirnamr(__file__)
#updating ends
if __name__ == "__main__":
actions = {'add': add,
'subtract': subtract,
'multiply': multiply,
'divide': divide }
act_params={'epsilon':0.85}
ap = ActionPlanner(actions,act_params)
s = {('value', 'v1'): -1.03}
explain = -2.05
extra = {}
extra['actions'] = actions
extra['epsilon'] = act_params['epsilon']
extra['tested'] = set()
problem = ActionPlannerProblem((tuple(s.items()), None, explain), extra=extra)
problem2 = NoHeuristic((tuple(s.items()), None, explain), extra=extra)
#print(s)
def cost_limited(problem):
return best_first_search(problem, cost_limit=4)
compare_searches([problem, problem2], [cost_limited])
| [
"numpy.minimum",
"time.time",
"numpy.arange",
"py_search.informed.best_first_search",
"inspect.getargspec",
"itertools.product",
"py_search.utils.compare_searches",
"numpy.add"
] | [((1348, 1374), 'numpy.arange', 'np.arange', (['(target.size + 1)'], {}), '(target.size + 1)\n', (1357, 1374), True, 'import numpy as np\n'), ((16352, 16405), 'py_search.utils.compare_searches', 'compare_searches', (['[problem, problem2]', '[cost_limited]'], {}), '([problem, problem2], [cost_limited])\n', (16368, 16405), False, 'from py_search.utils import compare_searches\n'), ((1862, 1912), 'numpy.minimum', 'np.minimum', (['current_row[1:]', '(current_row[0:-1] + 1)'], {}), '(current_row[1:], current_row[0:-1] + 1)\n', (1872, 1912), True, 'import numpy as np\n'), ((10194, 10200), 'time.time', 'time', ([], {}), '()\n', (10198, 10200), False, 'from time import time\n'), ((16306, 16346), 'py_search.informed.best_first_search', 'best_first_search', (['problem'], {'cost_limit': '(4)'}), '(problem, cost_limit=4)\n', (16323, 16346), False, 'from py_search.informed import best_first_search\n'), ((1739, 1777), 'numpy.add', 'np.add', (['previous_row[:-1]', '(target != s)'], {}), '(previous_row[:-1], target != s)\n', (1745, 1777), True, 'import numpy as np\n'), ((3155, 3194), 'itertools.product', 'product', (['possible_args'], {'repeat': 'num_args'}), '(possible_args, repeat=num_args)\n', (3162, 3194), False, 'from itertools import product\n'), ((9255, 9305), 'py_search.informed.best_first_search', 'best_first_search', (['problem'], {'cost_limit': 'depth_limit'}), '(problem, cost_limit=depth_limit)\n', (9272, 9305), False, 'from py_search.informed import best_first_search\n'), ((10242, 10292), 'py_search.informed.best_first_search', 'best_first_search', (['problem'], {'cost_limit': 'depth_limit'}), '(problem, cost_limit=depth_limit)\n', (10259, 10292), False, 'from py_search.informed import best_first_search\n'), ((8615, 8629), 'itertools.product', 'product', (['*exps'], {}), '(*exps)\n', (8622, 8629), False, 'from itertools import product\n'), ((2560, 2595), 'inspect.getargspec', 'inspect.getargspec', (['actions[action]'], {}), '(actions[action])\n', (2578, 2595), False, 'import inspect\n'), ((8547, 8561), 'itertools.product', 'product', (['*exps'], {}), '(*exps)\n', (8554, 8561), False, 'from itertools import product\n'), ((7881, 7899), 'itertools.product', 'product', (['*inp_exps'], {}), '(*inp_exps)\n', (7888, 7899), False, 'from itertools import product\n'), ((10627, 10633), 'time.time', 'time', ([], {}), '()\n', (10631, 10633), False, 'from time import time\n')] |
#!/usr/bin/env python3
"""Test script for algorithm_rgb code
"""
import os
import sys
import numpy as np
import gdal
import algorithm_rgb
def _get_variables_header_fields() -> str:
"""Returns a string representing the variable header fields
Return:
Returns a string representing the variables' header fields
"""
variables = algorithm_rgb.VARIABLE_NAMES.split(',')
labels = algorithm_rgb.VARIABLE_LABELS.split(',')
labels_len = len(labels)
units = algorithm_rgb.VARIABLE_UNITS.split(',')
units_len = len(units)
if labels_len != len(variables):
sys.stderr.write("The number of defined labels doesn't match the number of defined variables")
sys.stderr.write(" continuing processing")
if units_len != len(variables):
sys.stderr.write("The number of defined units doesn't match the number of defined variables")
sys.stderr.write(" continuing processing")
headers = ''
for idx, variable_name in enumerate(variables):
variable_header = variable_name
if idx < labels_len:
variable_header += ' - %s' % labels[idx]
if idx < units_len:
variable_header += ' (%s)' % units[idx]
headers += variable_header + ','
return headers
def print_usage():
"""Displays information on how to use this script
"""
argc = len(sys.argv)
if argc:
our_name = os.path.basename(sys.argv[0])
else:
our_name = os.path.basename(__file__)
print(our_name + " <folder>|<filename> ...")
print(" folder: path to folder containing images to process")
print(" filename: path to an image file to process")
print("")
print(" One or more folders and/or filenames can be used")
print(" Only files at the top level of a folder are processed")
def check_arguments():
"""Checks that we have script argument parameters that appear valid
"""
argc = len(sys.argv)
if argc < 2:
sys.stderr.write("One or more paths to images need to be specified on the command line\n")
print_usage()
return False
# Check that the paths exist.
have_errors = False
for idx in range(1, argc):
if not os.path.exists(sys.argv[idx]):
print("The following path doesn't exist: " + sys.argv[idx])
have_errors = True
if have_errors:
sys.stderr.write("Please correct any problems and try again\n")
return not have_errors
def check_configuration():
"""Checks if the configuration is setup properly for testing
"""
if not hasattr(algorithm_rgb, 'VARIABLE_NAMES') or not algorithm_rgb.VARIABLE_NAMES:
sys.stderr.write("Variable names configuration variable is not defined yet. Please define and try again")
sys.stderr.write(" Update configuration.py and set VALUE_NAMES variable with your variable names")
return False
return True
def run_test(filename):
"""Runs the extractor code using pixels from the file
Args:
filename(str): Path to image file
Return:
The result of calling the extractor's calculate() method
Notes:
Assumes the path passed in is valid. An error is reported if
the file is not an image file.
"""
try:
open_file = gdal.Open(filename)
if open_file:
# Get the pixels and call the calculation
pix = np.array(open_file.ReadAsArray())
calc_val = algorithm_rgb.calculate(np.rollaxis(pix, 0, 3))
# Check for unsupported types
if isinstance(calc_val, set):
raise RuntimeError("A 'set' type of data was returned and isn't supported. Please use a list or a tuple instead")
# Perform any type conversions to a printable string
if isinstance(calc_val, str):
print_val = calc_val
else:
# Check if the return is iterable and comma separate the values if it is
try:
_ = iter(calc_val)
print_val = ",".join(map(str, calc_val))
except Exception:
print_val = str(calc_val)
print(filename + "," + print_val)
except Exception as ex:
sys.stderr.write("Exception caught: " + str(ex) + "\n")
sys.stderr.write(" File: " + filename + "\n")
def process_files():
"""Processes the command line file/folder arguments
"""
argc = len(sys.argv)
if argc:
print("Filename," + _get_variables_header_fields())
for idx in range(1, argc):
cur_path = sys.argv[idx]
if not os.path.isdir(cur_path):
run_test(cur_path)
else:
allfiles = [os.path.join(cur_path, fn) for fn in os.listdir(cur_path) if os.path.isfile(os.path.join(cur_path, fn))]
for one_file in allfiles:
run_test(one_file)
if __name__ == "__main__":
if check_arguments() and check_configuration():
process_files()
| [
"numpy.rollaxis",
"os.path.basename",
"os.path.isdir",
"algorithm_rgb.VARIABLE_UNITS.split",
"os.path.exists",
"gdal.Open",
"algorithm_rgb.VARIABLE_NAMES.split",
"algorithm_rgb.VARIABLE_LABELS.split",
"sys.stderr.write",
"os.path.join",
"os.listdir"
] | [((353, 392), 'algorithm_rgb.VARIABLE_NAMES.split', 'algorithm_rgb.VARIABLE_NAMES.split', (['""","""'], {}), "(',')\n", (387, 392), False, 'import algorithm_rgb\n'), ((406, 446), 'algorithm_rgb.VARIABLE_LABELS.split', 'algorithm_rgb.VARIABLE_LABELS.split', (['""","""'], {}), "(',')\n", (441, 446), False, 'import algorithm_rgb\n'), ((488, 527), 'algorithm_rgb.VARIABLE_UNITS.split', 'algorithm_rgb.VARIABLE_UNITS.split', (['""","""'], {}), "(',')\n", (522, 527), False, 'import algorithm_rgb\n'), ((601, 705), 'sys.stderr.write', 'sys.stderr.write', (['"""The number of defined labels doesn\'t match the number of defined variables"""'], {}), '(\n "The number of defined labels doesn\'t match the number of defined variables"\n )\n', (617, 705), False, 'import sys\n'), ((704, 747), 'sys.stderr.write', 'sys.stderr.write', (['""" continuing processing"""'], {}), "(' continuing processing')\n", (720, 747), False, 'import sys\n'), ((792, 895), 'sys.stderr.write', 'sys.stderr.write', (['"""The number of defined units doesn\'t match the number of defined variables"""'], {}), '(\n "The number of defined units doesn\'t match the number of defined variables"\n )\n', (808, 895), False, 'import sys\n'), ((894, 937), 'sys.stderr.write', 'sys.stderr.write', (['""" continuing processing"""'], {}), "(' continuing processing')\n", (910, 937), False, 'import sys\n'), ((1411, 1440), 'os.path.basename', 'os.path.basename', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (1427, 1440), False, 'import os\n'), ((1470, 1496), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (1486, 1496), False, 'import os\n'), ((1979, 2074), 'sys.stderr.write', 'sys.stderr.write', (['"""One or more paths to images need to be specified on the command line\n"""'], {}), "(\n 'One or more paths to images need to be specified on the command line\\n')\n", (1995, 2074), False, 'import sys\n'), ((2381, 2444), 'sys.stderr.write', 'sys.stderr.write', (['"""Please correct any problems and try again\n"""'], {}), "('Please correct any problems and try again\\n')\n", (2397, 2444), False, 'import sys\n'), ((2672, 2787), 'sys.stderr.write', 'sys.stderr.write', (['"""Variable names configuration variable is not defined yet. Please define and try again"""'], {}), "(\n 'Variable names configuration variable is not defined yet. Please define and try again'\n )\n", (2688, 2787), False, 'import sys\n'), ((2786, 2897), 'sys.stderr.write', 'sys.stderr.write', (['""" Update configuration.py and set VALUE_NAMES variable with your variable names"""'], {}), "(\n ' Update configuration.py and set VALUE_NAMES variable with your variable names'\n )\n", (2802, 2897), False, 'import sys\n'), ((3295, 3314), 'gdal.Open', 'gdal.Open', (['filename'], {}), '(filename)\n', (3304, 3314), False, 'import gdal\n'), ((2218, 2247), 'os.path.exists', 'os.path.exists', (['sys.argv[idx]'], {}), '(sys.argv[idx])\n', (2232, 2247), False, 'import os\n'), ((4330, 4378), 'sys.stderr.write', 'sys.stderr.write', (["(' File: ' + filename + '\\n')"], {}), "(' File: ' + filename + '\\n')\n", (4346, 4378), False, 'import sys\n'), ((3490, 3512), 'numpy.rollaxis', 'np.rollaxis', (['pix', '(0)', '(3)'], {}), '(pix, 0, 3)\n', (3501, 3512), True, 'import numpy as np\n'), ((4655, 4678), 'os.path.isdir', 'os.path.isdir', (['cur_path'], {}), '(cur_path)\n', (4668, 4678), False, 'import os\n'), ((4761, 4787), 'os.path.join', 'os.path.join', (['cur_path', 'fn'], {}), '(cur_path, fn)\n', (4773, 4787), False, 'import os\n'), ((4798, 4818), 'os.listdir', 'os.listdir', (['cur_path'], {}), '(cur_path)\n', (4808, 4818), False, 'import os\n'), ((4838, 4864), 'os.path.join', 'os.path.join', (['cur_path', 'fn'], {}), '(cur_path, fn)\n', (4850, 4864), False, 'import os\n')] |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import json
import os
import tempfile
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from google.protobuf import text_format
from tensorflow.contrib import learn
from tensorflow.contrib import lookup
from tensorflow.python.training import training_util
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_state_pb2
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features],
0), array_ops.concat([labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
training_util.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
def _build_estimator_for_export_tests(tmpdir):
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column('feature', dimension=4)
]
est = linear.LinearRegressor(feature_columns)
est.fit(input_fn=_input_fn, steps=20)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
# hack in an op that uses an asset, in order to test asset export.
# this is not actually valid, of course.
def serving_input_fn_with_asset():
features, labels, inputs = serving_input_fn()
vocab_file_name = os.path.join(tmpdir, 'my_vocab_file')
vocab_file = gfile.GFile(vocab_file_name, mode='w')
vocab_file.write(VOCAB_FILE_CONTENT)
vocab_file.close()
hashtable = lookup.HashTable(
lookup.TextFileStringTableInitializer(vocab_file_name), 'x')
features['bogus_lookup'] = hashtable.lookup(
math_ops.to_int64(features['feature']))
return input_fn_utils.InputFnOps(features, labels, inputs)
return est, serving_input_fn_with_asset
def _build_estimator_for_resource_export_test():
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column('feature', dimension=4)
]
def resource_constant_model_fn(unused_features, unused_labels, mode):
"""A model_fn that loads a constant from a resource and serves it."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
const = constant_op.constant(-1, dtype=dtypes.int64)
table = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableModel')
update_global_step = training_util.get_global_step().assign_add(1)
if mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL):
key = constant_op.constant(['key'])
value = constant_op.constant([42], dtype=dtypes.int64)
train_op_1 = table.insert(key, value)
training_state = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableTrainingState')
training_op_2 = training_state.insert(key, value)
return (const, const,
control_flow_ops.group(train_op_1, training_op_2,
update_global_step))
if mode == model_fn.ModeKeys.INFER:
key = constant_op.constant(['key'])
prediction = table.lookup(key)
return prediction, const, update_global_step
est = estimator.Estimator(model_fn=resource_constant_model_fn)
est.fit(input_fn=_input_fn, steps=1)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
return est, serving_input_fn
class CheckCallsMonitor(monitors_lib.BaseMonitor):
def __init__(self, expect_calls):
super(CheckCallsMonitor, self).__init__()
self.begin_calls = None
self.end_calls = None
self.expect_calls = expect_calls
def begin(self, max_steps):
self.begin_calls = 0
self.end_calls = 0
def step_begin(self, step):
self.begin_calls += 1
return {}
def step_end(self, step, outputs):
self.end_calls += 1
return False
def end(self):
assert (self.end_calls == self.expect_calls and
self.begin_calls == self.expect_calls)
def _model_fn_ops(expected_features, expected_labels, actual_features,
actual_labels, mode):
assert_ops = tuple([
check_ops.assert_equal(
expected_features[k], actual_features[k], name='assert_%s' % k)
for k in expected_features
] + [
check_ops.assert_equal(
expected_labels, actual_labels, name='assert_labels')
])
with ops.control_dependencies(assert_ops):
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=training_util.get_global_step().assign_add(1))
def _make_input_fn(features, labels):
def _input_fn():
return {k: constant_op.constant(v)
for k, v in six.iteritems(features)}, constant_op.constant(labels)
return _input_fn
class EstimatorModelFnTest(test.TestCase):
def testModelFnArgs(self):
features = {'x': 42., 'y': 43.}
labels = 44.
expected_params = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
# `features` and `labels` are passed by position, `arg0` and `arg1` here.
def _model_fn(arg0, arg1, mode, params, config):
model_fn_call_count[0] += 1
self.assertItemsEqual(features.keys(), arg0.keys())
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_params, params)
self.assertTrue(config.i_am_test)
return _model_fn_ops(features, labels, arg0, arg1, mode)
est = estimator.Estimator(
model_fn=_model_fn, params=expected_params, config=expected_config)
self.assertEqual(0, model_fn_call_count[0])
est.fit(input_fn=_make_input_fn(features, labels), steps=1)
self.assertEqual(1, model_fn_call_count[0])
def testPartialModelFnArgs(self):
features = {'x': 42., 'y': 43.}
labels = 44.
expected_params = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
expected_foo = 45.
expected_bar = 46.
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
# `features` and `labels` are passed by position, `arg0` and `arg1` here.
def _model_fn(arg0, arg1, foo, mode, params, config, bar):
model_fn_call_count[0] += 1
self.assertEqual(expected_foo, foo)
self.assertEqual(expected_bar, bar)
self.assertItemsEqual(features.keys(), arg0.keys())
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_params, params)
self.assertTrue(config.i_am_test)
return _model_fn_ops(features, labels, arg0, arg1, mode)
partial_model_fn = functools.partial(
_model_fn, foo=expected_foo, bar=expected_bar)
est = estimator.Estimator(
model_fn=partial_model_fn,
params=expected_params,
config=expected_config)
self.assertEqual(0, model_fn_call_count[0])
est.fit(input_fn=_make_input_fn(features, labels), steps=1)
self.assertEqual(1, model_fn_call_count[0])
def testModelFnWithModelDir(self):
expected_param = {'some_param': 'some_value'}
expected_model_dir = tempfile.mkdtemp()
def _argument_checker(features,
labels,
mode,
params,
config=None,
model_dir=None):
_, _, _ = features, labels, config
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertEqual(model_dir, expected_model_dir)
return (constant_op.constant(0.), constant_op.constant(0.),
training_util.get_global_step().assign_add(1))
est = estimator.Estimator(
model_fn=_argument_checker,
params=expected_param,
model_dir=expected_model_dir)
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_train_op(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
update_global_step = training_util.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
loss = 100.0 - w
return None, loss, None
est = estimator.Estimator(model_fn=_invalid_model_fn)
with self.assertRaisesRegexp(ValueError, 'Missing train_op'):
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_loss(self):
def _invalid_model_fn(features, labels, mode):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
update_global_step = training_util.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
train_op = w.assign_add(loss / 100.0)
predictions = loss
if mode == model_fn.ModeKeys.EVAL:
loss = None
return predictions, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing loss'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
def testInvalidModelFn_no_prediction(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
update_global_step = training_util.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
train_op = w.assign_add(loss / 100.0)
return None, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(input_fn=boston_input_fn)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(
input_fn=functools.partial(boston_input_fn, num_epochs=1),
as_iterable=True)
def testModelFnScaffoldInTraining(self):
self.is_init_fn_called = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
self.is_init_fn_called = True
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=training_util.get_global_step().assign_add(1),
scaffold=monitored_session.Scaffold(init_fn=_init_fn))
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=boston_input_fn, steps=1)
self.assertTrue(self.is_init_fn_called)
def testModelFnScaffoldSaverUsage(self):
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
variables_lib.Variable(1., 'weight')
real_saver = saver_lib.Saver()
self.mock_saver = test.mock.Mock(
wraps=real_saver, saver_def=real_saver.saver_def)
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant([[1.]]),
loss=constant_op.constant(0.),
train_op=training_util.get_global_step().assign_add(1),
scaffold=monitored_session.Scaffold(saver=self.mock_saver))
def input_fn():
return {
'x': constant_op.constant([[1.]]),
}, constant_op.constant([[1.]])
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.save.called)
est.evaluate(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.restore.called)
est.predict(input_fn=input_fn)
self.assertTrue(self.mock_saver.restore.called)
def serving_input_fn():
serialized_tf_example = array_ops.placeholder(
dtype=dtypes.string, shape=[None], name='input_example_tensor')
features, labels = input_fn()
return input_fn_utils.InputFnOps(features, labels, {
'examples': serialized_tf_example
})
est.export_savedmodel(
os.path.join(est.model_dir, 'export'), serving_input_fn)
self.assertTrue(self.mock_saver.restore.called)
class EstimatorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=estimator.Estimator(model_fn=linear_model_fn),
train_input_fn=boston_input_fn,
eval_input_fn=boston_input_fn)
exp.test()
def testCheckpointSaverHookSuppressesTheDefaultOne(self):
saver_hook = test.mock.Mock(
spec=basic_session_run_hooks.CheckpointSaverHook)
saver_hook.before_run.return_value = None
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1, monitors=[saver_hook])
# test nothing is saved, due to suppressing default saver
with self.assertRaises(learn.NotFittedError):
est.evaluate(input_fn=boston_input_fn, steps=1)
def testCustomConfig(self):
test_random_seed = 5783452
class TestInput(object):
def __init__(self):
self.random_seed = 0
def config_test_input_fn(self):
self.random_seed = ops.get_default_graph().seed
return constant_op.constant([[1.]]), constant_op.constant([1.])
config = run_config.RunConfig(tf_random_seed=test_random_seed)
test_input = TestInput()
est = estimator.Estimator(model_fn=linear_model_fn, config=config)
est.fit(input_fn=test_input.config_test_input_fn, steps=1)
# If input_fn ran, it will have given us the random seed set on the graph.
self.assertEquals(test_random_seed, test_input.random_seed)
def testRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn, config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAndRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(
model_fn=linear_model_fn, config=config, model_dir='test_dir')
self.assertEqual('test_dir', est.config.model_dir)
with self.assertRaisesRegexp(
ValueError, 'model_dir are set both in constructor and RunConfig, '
'but with different'):
estimator.Estimator(
model_fn=linear_model_fn, config=config, model_dir='different_dir')
def testModelDirIsCopiedToRunConfig(self):
config = run_config.RunConfig()
self.assertIsNone(config.model_dir)
est = estimator.Estimator(
model_fn=linear_model_fn, model_dir='test_dir', config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAsTempDir(self):
with test.mock.patch.object(tempfile, 'mkdtemp', return_value='temp_dir'):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertEqual('temp_dir', est.config.model_dir)
self.assertEqual('temp_dir', est.model_dir)
def testCheckInputs(self):
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
# Lambdas so we have to different objects to compare
right_features = lambda: np.ones(shape=[7, 8], dtype=np.float32)
right_labels = lambda: np.ones(shape=[7, 10], dtype=np.int32)
est.fit(right_features(), right_labels(), steps=1)
# TODO(wicke): This does not fail for np.int32 because of data_feeder magic.
wrong_type_features = np.ones(shape=[7, 8], dtype=np.int64)
wrong_size_features = np.ones(shape=[7, 10])
wrong_type_labels = np.ones(shape=[7, 10], dtype=np.float32)
wrong_size_labels = np.ones(shape=[7, 11])
est.fit(x=right_features(), y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_type_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_size_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_type_labels, steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_size_labels, steps=1)
def testBadInput(self):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertRaisesRegexp(
ValueError,
'Either x or input_fn must be provided.',
est.fit,
x=None,
input_fn=None,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
x='X',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
y='Y',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and batch_size',
est.fit,
input_fn=iris_input_fn,
batch_size=100,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Inputs cannot be tensors. Please provide input_fn.',
est.fit,
x=constant_op.constant(1.),
steps=1)
def testUntrained(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
with self.assertRaises(learn.NotFittedError):
_ = est.score(x=boston.data, y=boston.target.astype(np.float64))
with self.assertRaises(learn.NotFittedError):
est.predict(x=boston.data)
def testContinueTraining(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.SKCompat(
estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=50)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
del est
# Create another estimator object with the same output dir.
est2 = estimator.SKCompat(
estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir))
# Check we can evaluate and predict.
scores2 = est2.score(
x=boston.data,
y=float64_labels,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
self.assertAllClose(scores['MSE'], scores2['MSE'])
predictions = np.array(list(est2.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, float64_labels)
self.assertAllClose(scores['MSE'], other_score)
# Check we can keep training.
est2.fit(x=boston.data, y=float64_labels, steps=100)
scores3 = est2.score(
x=boston.data,
y=float64_labels,
metrics={
'MSE': metric_ops.streaming_mean_squared_error
})
self.assertLess(scores3['MSE'], scores['MSE'])
def test_checkpoint_contains_relative_paths(self):
tmpdir = tempfile.mkdtemp()
est = estimator.Estimator(
model_dir=tmpdir, model_fn=linear_model_fn_with_model_fn_ops)
est.fit(input_fn=boston_input_fn, steps=5)
checkpoint_file_content = file_io.read_file_to_string(
os.path.join(tmpdir, 'checkpoint'))
ckpt = checkpoint_state_pb2.CheckpointState()
text_format.Merge(checkpoint_file_content, ckpt)
self.assertEqual(ckpt.model_checkpoint_path, 'model.ckpt-5')
# TODO(b/78461127): Please modify tests to not directly rely on names of
# checkpoints.
self.assertAllEqual(['model.ckpt-0', 'model.ckpt-5'],
ckpt.all_model_checkpoint_paths)
def test_train_save_copy_reload(self):
tmpdir = tempfile.mkdtemp()
model_dir1 = os.path.join(tmpdir, 'model_dir1')
est1 = estimator.Estimator(
model_dir=model_dir1, model_fn=linear_model_fn_with_model_fn_ops)
est1.fit(input_fn=boston_input_fn, steps=5)
model_dir2 = os.path.join(tmpdir, 'model_dir2')
os.renames(model_dir1, model_dir2)
est2 = estimator.Estimator(
model_dir=model_dir2, model_fn=linear_model_fn_with_model_fn_ops)
self.assertEqual(5, est2.get_variable_value('global_step'))
est2.fit(input_fn=boston_input_fn, steps=5)
self.assertEqual(10, est2.get_variable_value('global_step'))
def testEstimatorParams(self):
boston = base.load_boston()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_params_fn, params={
'learning_rate': 0.01
}))
est.fit(x=boston.data, y=boston.target, steps=100)
def testHooksNotChanged(self):
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
# We pass empty array and expect it to remain empty after calling
# fit and evaluate. Requires inside to copy this array if any hooks were
# added.
my_array = []
est.fit(input_fn=iris_input_fn, steps=100, monitors=my_array)
_ = est.evaluate(input_fn=iris_input_fn, steps=1, hooks=my_array)
self.assertEqual(my_array, [])
def testIrisIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = itertools.islice(iris.target, 100)
estimator.SKCompat(est).fit(x_iter, y_iter, steps=20)
eval_result = est.evaluate(input_fn=iris_input_fn, steps=1)
x_iter_eval = itertools.islice(iris.data, 100)
y_iter_eval = itertools.islice(iris.target, 100)
score_result = estimator.SKCompat(est).score(x_iter_eval, y_iter_eval)
print(score_result)
self.assertItemsEqual(eval_result.keys(), score_result.keys())
self.assertItemsEqual(['global_step', 'loss'], score_result.keys())
predictions = estimator.SKCompat(est).predict(x=iris.data)['class']
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIteratorArray(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (np.array(x) for x in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisIteratorPlainInt(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (v for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisTruncatedIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 50)
y_iter = ([np.int32(v)] for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
def testTrainStepsIsIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, steps=15)
self.assertEqual(25, est.get_variable_value('global_step'))
def testTrainMaxStepsIsNotIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, max_steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, max_steps=15)
self.assertEqual(15, est.get_variable_value('global_step'))
def testPredict(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
output = list(est.predict(x=boston.data, batch_size=10))
self.assertEqual(len(output), boston.target.shape[0])
def testWithModelFnOps(self):
"""Test for model_fn that returns `ModelFnOps`."""
est = estimator.Estimator(model_fn=linear_model_fn_with_model_fn_ops)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
scores = est.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores.keys())
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testWrongInput(self):
def other_input_fn():
return {
'other': constant_op.constant([0, 0, 0])
}, constant_op.constant([0, 0, 0])
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaises(ValueError):
est.fit(input_fn=other_input_fn, steps=1)
def testMonitorsForFit(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(
input_fn=boston_input_fn,
steps=21,
monitors=[CheckCallsMonitor(expect_calls=21)])
def testHooksForEvaluate(self):
class CheckCallHook(session_run_hook.SessionRunHook):
def __init__(self):
self.run_count = 0
def after_run(self, run_context, run_values):
self.run_count += 1
est = learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
hook = CheckCallHook()
est.evaluate(input_fn=boston_eval_fn, steps=3, hooks=[hook])
self.assertEqual(3, hook.run_count)
def testSummaryWriting(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(input_fn=boston_input_fn, steps=200)
loss_summary = util_test.simple_values_from_events(
util_test.latest_events(est.model_dir), ['OptimizeLoss/loss'])
self.assertEqual(1, len(loss_summary))
def testSummaryWritingWithSummaryProto(self):
def _streaming_mean_squared_error_histogram(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
metrics, update_ops = metric_ops.streaming_mean_squared_error(
predictions,
labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
return summary.histogram('histogram', metrics), update_ops
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(
input_fn=boston_input_fn,
steps=200,
metrics={
'MSE': _streaming_mean_squared_error_histogram
})
events = util_test.latest_events(est.model_dir + '/eval')
output_values = {}
for e in events:
if e.HasField('summary'):
for v in e.summary.value:
output_values[v.tag] = v
self.assertTrue('MSE' in output_values)
self.assertTrue(output_values['MSE'].HasField('histo'))
def testSummaryWritingWithTensor(self):
def _streaming_precition_mean_tensor(predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
return metric_ops.streaming_mean_tensor(
predictions,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(
input_fn=boston_input_fn,
steps=200,
metrics={
'PMT': _streaming_precition_mean_tensor
})
events = util_test.latest_events(est.model_dir + '/eval')
output_values = {}
for e in events:
if e.HasField('summary'):
for v in e.summary.value:
output_values[v.tag] = v
self.assertTrue('PMT' in output_values)
self.assertTrue(output_values['PMT'].HasField('tensor'))
def testLossInGraphCollection(self):
class _LossCheckerHook(session_run_hook.SessionRunHook):
def begin(self):
self.loss_collection = ops.get_collection(ops.GraphKeys.LOSSES)
hook = _LossCheckerHook()
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200, monitors=[hook])
self.assertTrue(hook.loss_collection)
def test_export_returns_exported_dirname(self):
expected = '/path/to/some_dir'
with test.mock.patch.object(estimator, 'export') as mock_export_module:
mock_export_module._export_estimator.return_value = expected
est = estimator.Estimator(model_fn=linear_model_fn)
actual = est.export('/path/to')
self.assertEquals(expected, actual)
def test_export_savedmodel(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_fn, assets_extra=assets_extra)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
self.assertItemsEqual(['bogus_lookup', 'feature'], [
compat.as_str_any(x)
for x in graph.get_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS)
])
# cleanup
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_with_resource(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_resource_export_test()
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(export_dir_base, serving_input_fn)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('LookupTableModel' in graph_ops)
self.assertFalse('LookupTableTrainingState' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_with_graph_transforms(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base,
serving_input_fn,
assets_extra=assets_extra,
graph_rewrite_specs=[
estimator.GraphRewriteSpec(['tag_1'], []),
estimator.GraphRewriteSpec(['tag_2', 'tag_3'],
['strip_unused_nodes'])
])
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
# tag_1 is untransformed.
tags = ['tag_1']
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, tags, export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
# Since there were no transforms, both save ops are still present.
self.assertTrue('save/SaveV2/tensor_names' in graph_ops)
self.assertTrue('save_1/SaveV2/tensor_names' in graph_ops)
# Since there were no transforms, the hash table lookup is still there.
self.assertTrue('hash_table_Lookup' in graph_ops)
# Restore, to validate that the export was well-formed.
# tag_2, tag_3 was subjected to strip_unused_nodes.
tags = ['tag_2', 'tag_3']
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, tags, export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
# The Saver used to restore the checkpoint into the export Session
# was not added to the SAVERS collection, so strip_unused_nodes removes
# it. The one explicitly created in export_savedmodel is tracked in
# the MetaGraphDef saver_def field, so that one is retained.
# TODO(soergel): Make Savers sane again. I understand this is all a bit
# nuts but for now the test demonstrates what actually happens.
self.assertFalse('save/SaveV2/tensor_names' in graph_ops)
self.assertTrue('save_1/SaveV2/tensor_names' in graph_ops)
# The fake hash table lookup wasn't connected to anything; stripped.
self.assertFalse('hash_table_Lookup' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
class InferRealValuedColumnsTest(test.TestCase):
def testInvalidArgs(self):
with self.assertRaisesRegexp(ValueError, 'x or input_fn must be provided'):
estimator.infer_real_valued_columns_from_input(None)
with self.assertRaisesRegexp(ValueError, 'cannot be tensors'):
estimator.infer_real_valued_columns_from_input(constant_op.constant(1.0))
def _assert_single_feature_column(self, expected_shape, expected_dtype,
feature_columns):
self.assertEqual(1, len(feature_columns))
feature_column = feature_columns[0]
self.assertEqual('', feature_column.name)
self.assertEqual({
'':
parsing_ops.FixedLenFeature(
shape=expected_shape, dtype=expected_dtype)
}, feature_column.config)
def testInt32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.int32))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int32), None))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.int64))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testInt64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int64), None))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testFloat32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.float32))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float32), None))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(shape=[7, 8], dtype=np.float64))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testFloat64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float64), None))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testBoolInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
estimator.infer_real_valued_columns_from_input(
np.array([[False for _ in xrange(8)] for _ in xrange(7)]))
def testBoolInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (constant_op.constant(False, shape=[7, 8], dtype=dtypes.bool), None)
)
def testStringInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input(
np.array([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]))
def testStringInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (
constant_op.constant([['%d.0' % i
for i in xrange(8)]
for _ in xrange(7)]),
None))
def testBostonInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
self._assert_single_feature_column([_BOSTON_INPUT_DIM], dtypes.float64,
feature_columns)
def testIrisInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
iris_input_fn)
self._assert_single_feature_column([_IRIS_INPUT_DIM], dtypes.float64,
feature_columns)
class ReplicaDeviceSetterTest(test.TestCase):
def testVariablesAreOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ', {
'TF_CONFIG': json.dumps(tf_config)
}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker', a.device)
def testVariablesAreLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('', v.device)
self.assertDeviceEqual('', v.initializer.device)
self.assertDeviceEqual('', w.device)
self.assertDeviceEqual('', w.initializer.device)
self.assertDeviceEqual('', a.device)
def testMutableHashTableIsOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ', {
'TF_CONFIG': json.dumps(tf_config)
}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64, default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('/job:ps/task:0', table.resource_handle.device)
self.assertDeviceEqual('/job:ps/task:0', output.device)
def testMutableHashTableIsLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64, default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('', table.resource_handle.device)
self.assertDeviceEqual('', output.device)
def testTaskIsSetOnWorkerWhenJobNameIsSet(self):
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0']
},
'task': {
'type': run_config.TaskType.WORKER,
'index': 3
}
}
with test.mock.patch.dict('os.environ', {
'TF_CONFIG': json.dumps(tf_config)
}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker/task:3', a.device)
if __name__ == '__main__':
test.main()
| [
"tensorflow.contrib.testing.python.framework.util_test.latest_events",
"tensorflow.python.util.compat.as_bytes",
"tensorflow.contrib.learn.Estimator",
"tensorflow.contrib.learn.python.learn.estimators.estimator.SKCompat",
"tensorflow.contrib.learn.python.learn.estimators.estimator.GraphRewriteSpec",
"nump... | [((3461, 3479), 'tensorflow.contrib.learn.python.learn.datasets.base.load_boston', 'base.load_boston', ([], {}), '()\n', (3477, 3479), False, 'from tensorflow.contrib.learn.python.learn.datasets import base\n'), ((3775, 3791), 'tensorflow.contrib.learn.python.learn.datasets.base.load_iris', 'base.load_iris', ([], {}), '()\n', (3789, 3791), False, 'from tensorflow.contrib.learn.python.learn.datasets import base\n'), ((4026, 4042), 'tensorflow.contrib.learn.python.learn.datasets.base.load_iris', 'base.load_iris', ([], {}), '()\n', (4040, 4042), False, 'from tensorflow.contrib.learn.python.learn.datasets import base\n'), ((4290, 4308), 'tensorflow.contrib.learn.python.learn.datasets.base.load_boston', 'base.load_boston', ([], {}), '()\n', (4306, 4308), False, 'from tensorflow.contrib.learn.python.learn.datasets import base\n'), ((5050, 5102), 'tensorflow.contrib.learn.python.learn.models.linear_regression_zero_init', 'models.linear_regression_zero_init', (['features', 'labels'], {}), '(features, labels)\n', (5084, 5102), False, 'from tensorflow.contrib.learn.python.learn import models\n'), ((5629, 5681), 'tensorflow.contrib.learn.python.learn.models.linear_regression_zero_init', 'models.linear_regression_zero_init', (['features', 'labels'], {}), '(features, labels)\n', (5663, 5681), False, 'from tensorflow.contrib.learn.python.learn import models\n'), ((6117, 6169), 'tensorflow.contrib.learn.python.learn.models.linear_regression_zero_init', 'models.linear_regression_zero_init', (['features', 'labels'], {}), '(features, labels)\n', (6151, 6169), False, 'from tensorflow.contrib.learn.python.learn import models\n'), ((6322, 6411), 'tensorflow.contrib.learn.python.learn.estimators.model_fn.ModelFnOps', 'model_fn.ModelFnOps', ([], {'mode': 'mode', 'predictions': 'prediction', 'loss': 'loss', 'train_op': 'train_op'}), '(mode=mode, predictions=prediction, loss=loss, train_op=\n train_op)\n', (6341, 6411), False, 'from tensorflow.contrib.learn.python.learn.estimators import model_fn\n'), ((6553, 6587), 'tensorflow.python.ops.array_ops.one_hot', 'array_ops.one_hot', (['labels', '(3)', '(1)', '(0)'], {}), '(labels, 3, 1, 0)\n', (6570, 6587), False, 'from tensorflow.python.ops import array_ops\n'), ((6610, 6664), 'tensorflow.contrib.learn.python.learn.models.logistic_regression_zero_init', 'models.logistic_regression_zero_init', (['features', 'labels'], {}), '(features, labels)\n', (6646, 6664), False, 'from tensorflow.contrib.learn.python.learn import models\n'), ((7376, 7415), 'tensorflow.contrib.learn.python.learn.estimators.linear.LinearRegressor', 'linear.LinearRegressor', (['feature_columns'], {}), '(feature_columns)\n', (7398, 7415), False, 'from tensorflow.contrib.learn.python.learn.estimators import linear\n'), ((7474, 7541), 'tensorflow.contrib.layers.python.layers.feature_column.create_feature_spec_for_parsing', 'feature_column_lib.create_feature_spec_for_parsing', (['feature_columns'], {}), '(feature_columns)\n', (7524, 7541), True, 'from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib\n'), ((7570, 7629), 'tensorflow.contrib.learn.python.learn.utils.input_fn_utils.build_parsing_serving_input_fn', 'input_fn_utils.build_parsing_serving_input_fn', (['feature_spec'], {}), '(feature_spec)\n', (7615, 7629), False, 'from tensorflow.contrib.learn.python.learn.utils import input_fn_utils\n'), ((9902, 9958), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'resource_constant_model_fn'}), '(model_fn=resource_constant_model_fn)\n', (9921, 9958), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((10016, 10083), 'tensorflow.contrib.layers.python.layers.feature_column.create_feature_spec_for_parsing', 'feature_column_lib.create_feature_spec_for_parsing', (['feature_columns'], {}), '(feature_columns)\n', (10066, 10083), True, 'from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib\n'), ((10112, 10171), 'tensorflow.contrib.learn.python.learn.utils.input_fn_utils.build_parsing_serving_input_fn', 'input_fn_utils.build_parsing_serving_input_fn', (['feature_spec'], {}), '(feature_spec)\n', (10157, 10171), False, 'from tensorflow.contrib.learn.python.learn.utils import input_fn_utils\n'), ((54486, 54497), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (54495, 54497), False, 'from tensorflow.python.platform import test\n'), ((3671, 3706), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['boston.target'], {}), '(boston.target)\n', (3691, 3706), False, 'from tensorflow.python.framework import constant_op\n'), ((3830, 3861), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['iris.data'], {}), '(iris.data)\n', (3850, 3861), False, 'from tensorflow.python.framework import constant_op\n'), ((3915, 3948), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['iris.target'], {}), '(iris.target)\n', (3935, 3948), False, 'from tensorflow.python.framework import constant_op\n'), ((4081, 4112), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['iris.data'], {}), '(iris.data)\n', (4101, 4112), False, 'from tensorflow.python.framework import constant_op\n'), ((4381, 4414), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['boston.data'], {}), '(boston.data)\n', (4401, 4414), False, 'from tensorflow.python.framework import constant_op\n'), ((4485, 4520), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['boston.target'], {}), '(boston.target)\n', (4505, 4520), False, 'from tensorflow.python.framework import constant_op\n'), ((4548, 4589), 'tensorflow.python.ops.array_ops.concat', 'array_ops.concat', (['[features, features]', '(0)'], {}), '([features, features], 0)\n', (4564, 4589), False, 'from tensorflow.python.ops import array_ops\n'), ((4617, 4654), 'tensorflow.python.ops.array_ops.concat', 'array_ops.concat', (['[labels, labels]', '(0)'], {}), '([labels, labels], 0)\n', (4633, 4654), False, 'from tensorflow.python.ops import array_ops\n'), ((5161, 5192), 'tensorflow.python.training.training_util.get_global_step', 'training_util.get_global_step', ([], {}), '()\n', (5190, 5192), False, 'from tensorflow.python.training import training_util\n'), ((5740, 5771), 'tensorflow.python.training.training_util.get_global_step', 'training_util.get_global_step', ([], {}), '()\n', (5769, 5771), False, 'from tensorflow.python.training import training_util\n'), ((6228, 6259), 'tensorflow.python.training.training_util.get_global_step', 'training_util.get_global_step', ([], {}), '()\n', (6257, 6259), False, 'from tensorflow.python.training import training_util\n'), ((6723, 6754), 'tensorflow.python.training.training_util.get_global_step', 'training_util.get_global_step', ([], {}), '()\n', (6752, 6754), False, 'from tensorflow.python.training import training_util\n'), ((7086, 7102), 'tensorflow.contrib.learn.python.learn.datasets.base.load_iris', 'base.load_iris', ([], {}), '()\n', (7100, 7102), False, 'from tensorflow.contrib.learn.python.learn.datasets import base\n'), ((7301, 7362), 'tensorflow.contrib.layers.python.layers.feature_column.real_valued_column', 'feature_column_lib.real_valued_column', (['"""feature"""'], {'dimension': '(4)'}), "('feature', dimension=4)\n", (7338, 7362), True, 'from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib\n'), ((7853, 7890), 'os.path.join', 'os.path.join', (['tmpdir', '"""my_vocab_file"""'], {}), "(tmpdir, 'my_vocab_file')\n", (7865, 7890), False, 'import os\n'), ((7908, 7946), 'tensorflow.python.platform.gfile.GFile', 'gfile.GFile', (['vocab_file_name'], {'mode': '"""w"""'}), "(vocab_file_name, mode='w')\n", (7919, 7946), False, 'from tensorflow.python.platform import gfile\n'), ((8223, 8274), 'tensorflow.contrib.learn.python.learn.utils.input_fn_utils.InputFnOps', 'input_fn_utils.InputFnOps', (['features', 'labels', 'inputs'], {}), '(features, labels, inputs)\n', (8248, 8274), False, 'from tensorflow.contrib.learn.python.learn.utils import input_fn_utils\n'), ((8400, 8416), 'tensorflow.contrib.learn.python.learn.datasets.base.load_iris', 'base.load_iris', ([], {}), '()\n', (8414, 8416), False, 'from tensorflow.contrib.learn.python.learn.datasets import base\n'), ((8615, 8676), 'tensorflow.contrib.layers.python.layers.feature_column.real_valued_column', 'feature_column_lib.real_valued_column', (['"""feature"""'], {'dimension': '(4)'}), "('feature', dimension=4)\n", (8652, 8676), True, 'from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib\n'), ((8955, 8999), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(-1)'], {'dtype': 'dtypes.int64'}), '(-1, dtype=dtypes.int64)\n', (8975, 8999), False, 'from tensorflow.python.framework import constant_op\n'), ((9012, 9101), 'tensorflow.contrib.lookup.MutableHashTable', 'lookup.MutableHashTable', (['dtypes.string', 'dtypes.int64', 'const'], {'name': '"""LookupTableModel"""'}), "(dtypes.string, dtypes.int64, const, name=\n 'LookupTableModel')\n", (9035, 9101), False, 'from tensorflow.contrib import lookup\n'), ((11167, 11203), 'tensorflow.python.framework.ops.control_dependencies', 'ops.control_dependencies', (['assert_ops'], {}), '(assert_ops)\n', (11191, 11203), False, 'from tensorflow.python.framework import ops\n'), ((11804, 11826), 'tensorflow.contrib.learn.python.learn.estimators.run_config.RunConfig', 'run_config.RunConfig', ([], {}), '()\n', (11824, 11826), False, 'from tensorflow.contrib.learn.python.learn.estimators import run_config\n'), ((12450, 12542), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': '_model_fn', 'params': 'expected_params', 'config': 'expected_config'}), '(model_fn=_model_fn, params=expected_params, config=\n expected_config)\n', (12469, 12542), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((12870, 12892), 'tensorflow.contrib.learn.python.learn.estimators.run_config.RunConfig', 'run_config.RunConfig', ([], {}), '()\n', (12890, 12892), False, 'from tensorflow.contrib.learn.python.learn.estimators import run_config\n'), ((13669, 13733), 'functools.partial', 'functools.partial', (['_model_fn'], {'foo': 'expected_foo', 'bar': 'expected_bar'}), '(_model_fn, foo=expected_foo, bar=expected_bar)\n', (13686, 13733), False, 'import functools\n'), ((13754, 13852), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'partial_model_fn', 'params': 'expected_params', 'config': 'expected_config'}), '(model_fn=partial_model_fn, params=expected_params,\n config=expected_config)\n', (13773, 13852), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((14147, 14165), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (14163, 14165), False, 'import tempfile\n'), ((14719, 14823), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': '_argument_checker', 'params': 'expected_param', 'model_dir': 'expected_model_dir'}), '(model_fn=_argument_checker, params=expected_param,\n model_dir=expected_model_dir)\n', (14738, 14823), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((15270, 15317), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': '_invalid_model_fn'}), '(model_fn=_invalid_model_fn)\n', (15289, 15317), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((15954, 16001), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': '_invalid_model_fn'}), '(model_fn=_invalid_model_fn)\n', (15973, 16001), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((16592, 16639), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': '_invalid_model_fn'}), '(model_fn=_invalid_model_fn)\n', (16611, 16639), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((17657, 17705), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': '_model_fn_scaffold'}), '(model_fn=_model_fn_scaffold)\n', (17676, 17705), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((18518, 18566), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': '_model_fn_scaffold'}), '(model_fn=_model_fn_scaffold)\n', (18537, 18566), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((19637, 19701), 'tensorflow.python.platform.test.mock.Mock', 'test.mock.Mock', ([], {'spec': 'basic_session_run_hooks.CheckpointSaverHook'}), '(spec=basic_session_run_hooks.CheckpointSaverHook)\n', (19651, 19701), False, 'from tensorflow.python.platform import test\n'), ((19767, 19812), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'linear_model_fn'}), '(model_fn=linear_model_fn)\n', (19786, 19812), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((20378, 20431), 'tensorflow.contrib.learn.python.learn.estimators.run_config.RunConfig', 'run_config.RunConfig', ([], {'tf_random_seed': 'test_random_seed'}), '(tf_random_seed=test_random_seed)\n', (20398, 20431), False, 'from tensorflow.contrib.learn.python.learn.estimators import run_config\n'), ((20471, 20531), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'linear_model_fn', 'config': 'config'}), '(model_fn=linear_model_fn, config=config)\n', (20490, 20531), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((20787, 20829), 'tensorflow.contrib.learn.python.learn.estimators.run_config.RunConfig', 'run_config.RunConfig', ([], {'model_dir': '"""test_dir"""'}), "(model_dir='test_dir')\n", (20807, 20829), False, 'from tensorflow.contrib.learn.python.learn.estimators import run_config\n'), ((20840, 20900), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'linear_model_fn', 'config': 'config'}), '(model_fn=linear_model_fn, config=config)\n', (20859, 20900), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((21064, 21106), 'tensorflow.contrib.learn.python.learn.estimators.run_config.RunConfig', 'run_config.RunConfig', ([], {'model_dir': '"""test_dir"""'}), "(model_dir='test_dir')\n", (21084, 21106), False, 'from tensorflow.contrib.learn.python.learn.estimators import run_config\n'), ((21117, 21204), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'linear_model_fn', 'config': 'config', 'model_dir': '"""test_dir"""'}), "(model_fn=linear_model_fn, config=config, model_dir=\n 'test_dir')\n", (21136, 21204), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((21570, 21592), 'tensorflow.contrib.learn.python.learn.estimators.run_config.RunConfig', 'run_config.RunConfig', ([], {}), '()\n', (21590, 21592), False, 'from tensorflow.contrib.learn.python.learn.estimators import run_config\n'), ((21644, 21731), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'linear_model_fn', 'model_dir': '"""test_dir"""', 'config': 'config'}), "(model_fn=linear_model_fn, model_dir='test_dir', config=\n config)\n", (21663, 21731), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((22579, 22616), 'numpy.ones', 'np.ones', ([], {'shape': '[7, 8]', 'dtype': 'np.int64'}), '(shape=[7, 8], dtype=np.int64)\n', (22586, 22616), True, 'import numpy as np\n'), ((22643, 22665), 'numpy.ones', 'np.ones', ([], {'shape': '[7, 10]'}), '(shape=[7, 10])\n', (22650, 22665), True, 'import numpy as np\n'), ((22690, 22730), 'numpy.ones', 'np.ones', ([], {'shape': '[7, 10]', 'dtype': 'np.float32'}), '(shape=[7, 10], dtype=np.float32)\n', (22697, 22730), True, 'import numpy as np\n'), ((22755, 22777), 'numpy.ones', 'np.ones', ([], {'shape': '[7, 11]'}), '(shape=[7, 11])\n', (22762, 22777), True, 'import numpy as np\n'), ((23290, 23335), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'linear_model_fn'}), '(model_fn=linear_model_fn)\n', (23309, 23335), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((24289, 24307), 'tensorflow.contrib.learn.python.learn.datasets.base.load_boston', 'base.load_boston', ([], {}), '()\n', (24305, 24307), False, 'from tensorflow.contrib.learn.python.learn.datasets import base\n'), ((24636, 24654), 'tensorflow.contrib.learn.python.learn.datasets.base.load_boston', 'base.load_boston', ([], {}), '()\n', (24652, 24654), False, 'from tensorflow.contrib.learn.python.learn.datasets import base\n'), ((24672, 24690), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (24688, 24690), False, 'import tempfile\n'), ((25592, 25648), 'tensorflow.contrib.learn.python.learn.estimators._sklearn.mean_squared_error', '_sklearn.mean_squared_error', (['predictions', 'float64_labels'], {}), '(predictions, float64_labels)\n', (25619, 25648), False, 'from tensorflow.contrib.learn.python.learn.estimators import _sklearn\n'), ((26074, 26092), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (26090, 26092), False, 'import tempfile\n'), ((26103, 26189), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_dir': 'tmpdir', 'model_fn': 'linear_model_fn_with_model_fn_ops'}), '(model_dir=tmpdir, model_fn=\n linear_model_fn_with_model_fn_ops)\n', (26122, 26189), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((26356, 26394), 'tensorflow.python.training.checkpoint_state_pb2.CheckpointState', 'checkpoint_state_pb2.CheckpointState', ([], {}), '()\n', (26392, 26394), False, 'from tensorflow.python.training import checkpoint_state_pb2\n'), ((26399, 26447), 'google.protobuf.text_format.Merge', 'text_format.Merge', (['checkpoint_file_content', 'ckpt'], {}), '(checkpoint_file_content, ckpt)\n', (26416, 26447), False, 'from google.protobuf import text_format\n'), ((26779, 26797), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (26795, 26797), False, 'import tempfile\n'), ((26815, 26849), 'os.path.join', 'os.path.join', (['tmpdir', '"""model_dir1"""'], {}), "(tmpdir, 'model_dir1')\n", (26827, 26849), False, 'import os\n'), ((26861, 26951), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_dir': 'model_dir1', 'model_fn': 'linear_model_fn_with_model_fn_ops'}), '(model_dir=model_dir1, model_fn=\n linear_model_fn_with_model_fn_ops)\n', (26880, 26951), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((27022, 27056), 'os.path.join', 'os.path.join', (['tmpdir', '"""model_dir2"""'], {}), "(tmpdir, 'model_dir2')\n", (27034, 27056), False, 'import os\n'), ((27061, 27095), 'os.renames', 'os.renames', (['model_dir1', 'model_dir2'], {}), '(model_dir1, model_dir2)\n', (27071, 27095), False, 'import os\n'), ((27107, 27197), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_dir': 'model_dir2', 'model_fn': 'linear_model_fn_with_model_fn_ops'}), '(model_dir=model_dir2, model_fn=\n linear_model_fn_with_model_fn_ops)\n', (27126, 27197), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((27426, 27444), 'tensorflow.contrib.learn.python.learn.datasets.base.load_boston', 'base.load_boston', ([], {}), '()\n', (27442, 27444), False, 'from tensorflow.contrib.learn.python.learn.datasets import base\n'), ((27711, 27766), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'logistic_model_no_mode_fn'}), '(model_fn=logistic_model_no_mode_fn)\n', (27730, 27766), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((28158, 28174), 'tensorflow.contrib.learn.python.learn.datasets.base.load_iris', 'base.load_iris', ([], {}), '()\n', (28172, 28174), False, 'from tensorflow.contrib.learn.python.learn.datasets import base\n'), ((28185, 28240), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'logistic_model_no_mode_fn'}), '(model_fn=logistic_model_no_mode_fn)\n', (28204, 28240), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((28254, 28286), 'itertools.islice', 'itertools.islice', (['iris.data', '(100)'], {}), '(iris.data, 100)\n', (28270, 28286), False, 'import itertools\n'), ((28300, 28334), 'itertools.islice', 'itertools.islice', (['iris.target', '(100)'], {}), '(iris.target, 100)\n', (28316, 28334), False, 'import itertools\n'), ((28475, 28507), 'itertools.islice', 'itertools.islice', (['iris.data', '(100)'], {}), '(iris.data, 100)\n', (28491, 28507), False, 'import itertools\n'), ((28526, 28560), 'itertools.islice', 'itertools.islice', (['iris.target', '(100)'], {}), '(iris.target, 100)\n', (28542, 28560), False, 'import itertools\n'), ((28979, 28995), 'tensorflow.contrib.learn.python.learn.datasets.base.load_iris', 'base.load_iris', ([], {}), '()\n', (28993, 28995), False, 'from tensorflow.contrib.learn.python.learn.datasets import base\n'), ((29006, 29061), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'logistic_model_no_mode_fn'}), '(model_fn=logistic_model_no_mode_fn)\n', (29025, 29061), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((29075, 29107), 'itertools.islice', 'itertools.islice', (['iris.data', '(100)'], {}), '(iris.data, 100)\n', (29091, 29107), False, 'import itertools\n'), ((29351, 29367), 'tensorflow.contrib.learn.python.learn.datasets.base.load_iris', 'base.load_iris', ([], {}), '()\n', (29365, 29367), False, 'from tensorflow.contrib.learn.python.learn.datasets import base\n'), ((29378, 29433), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'logistic_model_no_mode_fn'}), '(model_fn=logistic_model_no_mode_fn)\n', (29397, 29433), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((29447, 29479), 'itertools.islice', 'itertools.islice', (['iris.data', '(100)'], {}), '(iris.data, 100)\n', (29463, 29479), False, 'import itertools\n'), ((29714, 29730), 'tensorflow.contrib.learn.python.learn.datasets.base.load_iris', 'base.load_iris', ([], {}), '()\n', (29728, 29730), False, 'from tensorflow.contrib.learn.python.learn.datasets import base\n'), ((29741, 29796), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'logistic_model_no_mode_fn'}), '(model_fn=logistic_model_no_mode_fn)\n', (29760, 29796), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((29810, 29841), 'itertools.islice', 'itertools.islice', (['iris.data', '(50)'], {}), '(iris.data, 50)\n', (29826, 29841), False, 'import itertools\n'), ((29983, 30028), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'linear_model_fn'}), '(model_fn=linear_model_fn)\n', (30002, 30028), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((30311, 30356), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'linear_model_fn'}), '(model_fn=linear_model_fn)\n', (30330, 30356), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((30625, 30670), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'linear_model_fn'}), '(model_fn=linear_model_fn)\n', (30644, 30670), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((30684, 30702), 'tensorflow.contrib.learn.python.learn.datasets.base.load_boston', 'base.load_boston', ([], {}), '()\n', (30700, 30702), False, 'from tensorflow.contrib.learn.python.learn.datasets import base\n'), ((30967, 31030), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'linear_model_fn_with_model_fn_ops'}), '(model_fn=linear_model_fn_with_model_fn_ops)\n', (30986, 31030), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((31044, 31062), 'tensorflow.contrib.learn.python.learn.datasets.base.load_boston', 'base.load_boston', ([], {}), '()\n', (31060, 31062), False, 'from tensorflow.contrib.learn.python.learn.datasets import base\n'), ((31125, 31173), 'functools.partial', 'functools.partial', (['boston_input_fn'], {'num_epochs': '(1)'}), '(boston_input_fn, num_epochs=1)\n', (31142, 31173), False, 'import functools\n'), ((31551, 31596), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'linear_model_fn'}), '(model_fn=linear_model_fn)\n', (31570, 31596), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((31775, 31820), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'linear_model_fn'}), '(model_fn=linear_model_fn)\n', (31794, 31820), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((32181, 32222), 'tensorflow.contrib.learn.Estimator', 'learn.Estimator', ([], {'model_fn': 'linear_model_fn'}), '(model_fn=linear_model_fn)\n', (32196, 32222), False, 'from tensorflow.contrib import learn\n'), ((32446, 32491), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'linear_model_fn'}), '(model_fn=linear_model_fn)\n', (32465, 32491), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((33538, 33583), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'linear_model_fn'}), '(model_fn=linear_model_fn)\n', (33557, 33583), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((33805, 33853), 'tensorflow.contrib.testing.python.framework.util_test.latest_events', 'util_test.latest_events', (["(est.model_dir + '/eval')"], {}), "(est.model_dir + '/eval')\n", (33828, 33853), False, 'from tensorflow.contrib.testing.python.framework import util_test\n'), ((34674, 34719), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'linear_model_fn'}), '(model_fn=linear_model_fn)\n', (34693, 34719), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((34934, 34982), 'tensorflow.contrib.testing.python.framework.util_test.latest_events', 'util_test.latest_events', (["(est.model_dir + '/eval')"], {}), "(est.model_dir + '/eval')\n", (34957, 34982), False, 'from tensorflow.contrib.testing.python.framework import util_test\n'), ((35472, 35517), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'linear_model_fn'}), '(model_fn=linear_model_fn)\n', (35491, 35517), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((36043, 36061), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (36059, 36061), False, 'import tempfile\n'), ((36253, 36291), 'tensorflow.python.platform.gfile.GFile', 'gfile.GFile', (['extra_file_name'], {'mode': '"""w"""'}), "(extra_file_name, mode='w')\n", (36264, 36291), False, 'from tensorflow.python.platform import gfile\n'), ((39495, 39526), 'tensorflow.python.platform.gfile.DeleteRecursively', 'gfile.DeleteRecursively', (['tmpdir'], {}), '(tmpdir)\n', (39518, 39526), False, 'from tensorflow.python.platform import gfile\n'), ((39591, 39609), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (39607, 39609), False, 'import tempfile\n'), ((41185, 41216), 'tensorflow.python.platform.gfile.DeleteRecursively', 'gfile.DeleteRecursively', (['tmpdir'], {}), '(tmpdir)\n', (41208, 41216), False, 'from tensorflow.python.platform import gfile\n'), ((41289, 41307), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (41305, 41307), False, 'import tempfile\n'), ((41499, 41537), 'tensorflow.python.platform.gfile.GFile', 'gfile.GFile', (['extra_file_name'], {'mode': '"""w"""'}), "(extra_file_name, mode='w')\n", (41510, 41537), False, 'from tensorflow.python.platform import gfile\n'), ((46588, 46619), 'tensorflow.python.platform.gfile.DeleteRecursively', 'gfile.DeleteRecursively', (['tmpdir'], {}), '(tmpdir)\n', (46611, 46619), False, 'from tensorflow.python.platform import gfile\n'), ((50723, 50789), 'tensorflow.contrib.learn.python.learn.estimators.estimator.infer_real_valued_columns_from_input_fn', 'estimator.infer_real_valued_columns_from_input_fn', (['boston_input_fn'], {}), '(boston_input_fn)\n', (50772, 50789), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((50983, 51047), 'tensorflow.contrib.learn.python.learn.estimators.estimator.infer_real_valued_columns_from_input_fn', 'estimator.infer_real_valued_columns_from_input_fn', (['iris_input_fn'], {}), '(iris_input_fn)\n', (51032, 51047), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((3552, 3585), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['boston.data'], {}), '(boston.data)\n', (3572, 3585), False, 'from tensorflow.python.framework import constant_op\n'), ((4184, 4217), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['iris.target'], {}), '(iris.target)\n', (4204, 4217), False, 'from tensorflow.python.framework import constant_op\n'), ((6834, 6864), 'tensorflow.python.ops.math_ops.argmax', 'math_ops.argmax', (['prediction', '(1)'], {}), '(prediction, 1)\n', (6849, 6864), False, 'from tensorflow.python.ops import math_ops\n'), ((7196, 7262), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['iris.target'], {'shape': '[150]', 'dtype': 'dtypes.int32'}), '(iris.target, shape=[150], dtype=dtypes.int32)\n', (7216, 7262), False, 'from tensorflow.python.framework import constant_op\n'), ((8053, 8107), 'tensorflow.contrib.lookup.TextFileStringTableInitializer', 'lookup.TextFileStringTableInitializer', (['vocab_file_name'], {}), '(vocab_file_name)\n', (8090, 8107), False, 'from tensorflow.contrib import lookup\n'), ((8171, 8209), 'tensorflow.python.ops.math_ops.to_int64', 'math_ops.to_int64', (["features['feature']"], {}), "(features['feature'])\n", (8188, 8209), False, 'from tensorflow.python.ops import math_ops\n'), ((8510, 8576), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['iris.target'], {'shape': '[150]', 'dtype': 'dtypes.int32'}), '(iris.target, shape=[150], dtype=dtypes.int32)\n', (8530, 8576), False, 'from tensorflow.python.framework import constant_op\n'), ((9255, 9284), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (["['key']"], {}), "(['key'])\n", (9275, 9284), False, 'from tensorflow.python.framework import constant_op\n'), ((9299, 9345), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[42]'], {'dtype': 'dtypes.int64'}), '([42], dtype=dtypes.int64)\n', (9319, 9345), False, 'from tensorflow.python.framework import constant_op\n'), ((9413, 9510), 'tensorflow.contrib.lookup.MutableHashTable', 'lookup.MutableHashTable', (['dtypes.string', 'dtypes.int64', 'const'], {'name': '"""LookupTableTrainingState"""'}), "(dtypes.string, dtypes.int64, const, name=\n 'LookupTableTrainingState')\n", (9436, 9510), False, 'from tensorflow.contrib import lookup\n'), ((9775, 9804), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (["['key']"], {}), "(['key'])\n", (9795, 9804), False, 'from tensorflow.python.framework import constant_op\n'), ((11554, 11582), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['labels'], {}), '(labels)\n', (11574, 11582), False, 'from tensorflow.python.framework import constant_op\n'), ((15033, 15071), 'tensorflow.python.ops.variables.Variable', 'variables_lib.Variable', (['(42.0)', '"""weight"""'], {}), "(42.0, 'weight')\n", (15055, 15071), True, 'from tensorflow.python.ops import variables as variables_lib\n'), ((15576, 15614), 'tensorflow.python.ops.variables.Variable', 'variables_lib.Variable', (['(42.0)', '"""weight"""'], {}), "(42.0, 'weight')\n", (15598, 15614), True, 'from tensorflow.python.ops import variables as variables_lib\n'), ((16307, 16345), 'tensorflow.python.ops.variables.Variable', 'variables_lib.Variable', (['(42.0)', '"""weight"""'], {}), "(42.0, 'weight')\n", (16329, 16345), True, 'from tensorflow.python.ops import variables as variables_lib\n'), ((17930, 17967), 'tensorflow.python.ops.variables.Variable', 'variables_lib.Variable', (['(1.0)', '"""weight"""'], {}), "(1.0, 'weight')\n", (17952, 17967), True, 'from tensorflow.python.ops import variables as variables_lib\n'), ((17986, 18003), 'tensorflow.python.training.saver.Saver', 'saver_lib.Saver', ([], {}), '()\n', (18001, 18003), True, 'from tensorflow.python.training import saver as saver_lib\n'), ((18028, 18092), 'tensorflow.python.platform.test.mock.Mock', 'test.mock.Mock', ([], {'wraps': 'real_saver', 'saver_def': 'real_saver.saver_def'}), '(wraps=real_saver, saver_def=real_saver.saver_def)\n', (18042, 18092), False, 'from tensorflow.python.platform import test\n'), ((18899, 18989), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', ([], {'dtype': 'dtypes.string', 'shape': '[None]', 'name': '"""input_example_tensor"""'}), "(dtype=dtypes.string, shape=[None], name=\n 'input_example_tensor')\n", (18920, 18989), False, 'from tensorflow.python.ops import array_ops\n'), ((19045, 19130), 'tensorflow.contrib.learn.python.learn.utils.input_fn_utils.InputFnOps', 'input_fn_utils.InputFnOps', (['features', 'labels', "{'examples': serialized_tf_example}"], {}), "(features, labels, {'examples': serialized_tf_example}\n )\n", (19070, 19130), False, 'from tensorflow.contrib.learn.python.learn.utils import input_fn_utils\n'), ((19180, 19217), 'os.path.join', 'os.path.join', (['est.model_dir', '"""export"""'], {}), "(est.model_dir, 'export')\n", (19192, 19217), False, 'import os\n'), ((21412, 21504), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'linear_model_fn', 'config': 'config', 'model_dir': '"""different_dir"""'}), "(model_fn=linear_model_fn, config=config, model_dir=\n 'different_dir')\n", (21431, 21504), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((21884, 21952), 'tensorflow.python.platform.test.mock.patch.object', 'test.mock.patch.object', (['tempfile', '"""mkdtemp"""'], {'return_value': '"""temp_dir"""'}), "(tempfile, 'mkdtemp', return_value='temp_dir')\n", (21906, 21952), False, 'from tensorflow.python.platform import test\n'), ((21966, 22011), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'linear_model_fn'}), '(model_fn=linear_model_fn)\n', (21985, 22011), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((22178, 22223), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'linear_model_fn'}), '(model_fn=linear_model_fn)\n', (22197, 22223), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((22311, 22350), 'numpy.ones', 'np.ones', ([], {'shape': '[7, 8]', 'dtype': 'np.float32'}), '(shape=[7, 8], dtype=np.float32)\n', (22318, 22350), True, 'import numpy as np\n'), ((22378, 22416), 'numpy.ones', 'np.ones', ([], {'shape': '[7, 10]', 'dtype': 'np.int32'}), '(shape=[7, 10], dtype=np.int32)\n', (22385, 22416), True, 'import numpy as np\n'), ((24337, 24382), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'linear_model_fn'}), '(model_fn=linear_model_fn)\n', (24356, 24382), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((24729, 24796), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'linear_model_fn', 'model_dir': 'output_dir'}), '(model_fn=linear_model_fn, model_dir=output_dir)\n', (24748, 24796), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((25183, 25250), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'linear_model_fn', 'model_dir': 'output_dir'}), '(model_fn=linear_model_fn, model_dir=output_dir)\n', (25202, 25250), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((26309, 26343), 'os.path.join', 'os.path.join', (['tmpdir', '"""checkpoint"""'], {}), "(tmpdir, 'checkpoint')\n", (26321, 26343), False, 'import os\n'), ((27483, 27572), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'linear_model_params_fn', 'params': "{'learning_rate': 0.01}"}), "(model_fn=linear_model_params_fn, params={\n 'learning_rate': 0.01})\n", (27502, 27572), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((29122, 29133), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (29130, 29133), True, 'import numpy as np\n'), ((32659, 32697), 'tensorflow.contrib.testing.python.framework.util_test.latest_events', 'util_test.latest_events', (['est.model_dir'], {}), '(est.model_dir)\n', (32682, 32697), False, 'from tensorflow.contrib.testing.python.framework import util_test\n'), ((33230, 33410), 'tensorflow.contrib.metrics.python.ops.metric_ops.streaming_mean_squared_error', 'metric_ops.streaming_mean_squared_error', (['predictions', 'labels'], {'weights': 'weights', 'metrics_collections': 'metrics_collections', 'updates_collections': 'updates_collections', 'name': 'name'}), '(predictions, labels, weights=\n weights, metrics_collections=metrics_collections, updates_collections=\n updates_collections, name=name)\n', (33269, 33410), False, 'from tensorflow.contrib.metrics.python.ops import metric_ops\n'), ((34456, 34620), 'tensorflow.contrib.metrics.python.ops.metric_ops.streaming_mean_tensor', 'metric_ops.streaming_mean_tensor', (['predictions'], {'weights': 'weights', 'metrics_collections': 'metrics_collections', 'updates_collections': 'updates_collections', 'name': 'name'}), '(predictions, weights=weights,\n metrics_collections=metrics_collections, updates_collections=\n updates_collections, name=name)\n', (34488, 34620), False, 'from tensorflow.contrib.metrics.python.ops import metric_ops\n'), ((35721, 35764), 'tensorflow.python.platform.test.mock.patch.object', 'test.mock.patch.object', (['estimator', '"""export"""'], {}), "(estimator, 'export')\n", (35743, 35764), False, 'from tensorflow.python.platform import test\n'), ((35868, 35913), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'linear_model_fn'}), '(model_fn=linear_model_fn)\n', (35887, 35913), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((36177, 36200), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['tmpdir'], {}), '(tmpdir)\n', (36192, 36200), False, 'from tensorflow.python.util import compat\n'), ((36202, 36234), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['"""my_extra_file"""'], {}), "('my_extra_file')\n", (36217, 36234), False, 'from tensorflow.python.util import compat\n'), ((36474, 36497), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['tmpdir'], {}), '(tmpdir)\n', (36489, 36497), False, 'from tensorflow.python.util import compat\n'), ((36499, 36524), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['"""export"""'], {}), "('export')\n", (36514, 36524), False, 'from tensorflow.python.util import compat\n'), ((36657, 36686), 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['export_dir_base'], {}), '(export_dir_base)\n', (36669, 36686), False, 'from tensorflow.python.platform import gfile\n'), ((36708, 36732), 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['export_dir'], {}), '(export_dir)\n', (36720, 36732), False, 'from tensorflow.python.platform import gfile\n'), ((37764, 37799), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['VOCAB_FILE_CONTENT'], {}), '(VOCAB_FILE_CONTENT)\n', (37779, 37799), False, 'from tensorflow.python.util import compat\n'), ((38050, 38077), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['export_dir'], {}), '(export_dir)\n', (38065, 38077), False, 'from tensorflow.python.util import compat\n'), ((38087, 38151), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['"""assets.extra/some/sub/directory/my_extra_file"""'], {}), "('assets.extra/some/sub/directory/my_extra_file')\n", (38102, 38151), False, 'from tensorflow.python.util import compat\n'), ((38322, 38355), 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['expected_extra_path'], {}), '(expected_extra_path)\n', (38334, 38355), False, 'from tensorflow.python.platform import gfile\n'), ((38387, 38422), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['EXTRA_FILE_CONTENT'], {}), '(EXTRA_FILE_CONTENT)\n', (38402, 38422), False, 'from tensorflow.python.util import compat\n'), ((38539, 38562), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['tmpdir'], {}), '(tmpdir)\n', (38554, 38562), False, 'from tensorflow.python.util import compat\n'), ((38564, 38596), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['"""my_vocab_file"""'], {}), "('my_vocab_file')\n", (38579, 38596), False, 'from tensorflow.python.util import compat\n'), ((39727, 39750), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['tmpdir'], {}), '(tmpdir)\n', (39742, 39750), False, 'from tensorflow.python.util import compat\n'), ((39752, 39777), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['"""export"""'], {}), "('export')\n", (39767, 39777), False, 'from tensorflow.python.util import compat\n'), ((39874, 39903), 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['export_dir_base'], {}), '(export_dir_base)\n', (39886, 39903), False, 'from tensorflow.python.platform import gfile\n'), ((39925, 39949), 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['export_dir'], {}), '(export_dir)\n', (39937, 39949), False, 'from tensorflow.python.platform import gfile\n'), ((41423, 41446), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['tmpdir'], {}), '(tmpdir)\n', (41438, 41446), False, 'from tensorflow.python.util import compat\n'), ((41448, 41480), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['"""my_extra_file"""'], {}), "('my_extra_file')\n", (41463, 41480), False, 'from tensorflow.python.util import compat\n'), ((41720, 41743), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['tmpdir'], {}), '(tmpdir)\n', (41735, 41743), False, 'from tensorflow.python.util import compat\n'), ((41745, 41770), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['"""export"""'], {}), "('export')\n", (41760, 41770), False, 'from tensorflow.python.util import compat\n'), ((42137, 42166), 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['export_dir_base'], {}), '(export_dir_base)\n', (42149, 42166), False, 'from tensorflow.python.platform import gfile\n'), ((42188, 42212), 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['export_dir'], {}), '(export_dir)\n', (42200, 42212), False, 'from tensorflow.python.platform import gfile\n'), ((43244, 43279), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['VOCAB_FILE_CONTENT'], {}), '(VOCAB_FILE_CONTENT)\n', (43259, 43279), False, 'from tensorflow.python.util import compat\n'), ((43530, 43557), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['export_dir'], {}), '(export_dir)\n', (43545, 43557), False, 'from tensorflow.python.util import compat\n'), ((43567, 43631), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['"""assets.extra/some/sub/directory/my_extra_file"""'], {}), "('assets.extra/some/sub/directory/my_extra_file')\n", (43582, 43631), False, 'from tensorflow.python.util import compat\n'), ((43802, 43835), 'tensorflow.python.platform.gfile.Exists', 'gfile.Exists', (['expected_extra_path'], {}), '(expected_extra_path)\n', (43814, 43835), False, 'from tensorflow.python.platform import gfile\n'), ((43867, 43902), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['EXTRA_FILE_CONTENT'], {}), '(EXTRA_FILE_CONTENT)\n', (43882, 43902), False, 'from tensorflow.python.util import compat\n'), ((44019, 44042), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['tmpdir'], {}), '(tmpdir)\n', (44034, 44042), False, 'from tensorflow.python.util import compat\n'), ((44044, 44076), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['"""my_vocab_file"""'], {}), "('my_vocab_file')\n", (44059, 44076), False, 'from tensorflow.python.util import compat\n'), ((46787, 46839), 'tensorflow.contrib.learn.python.learn.estimators.estimator.infer_real_valued_columns_from_input', 'estimator.infer_real_valued_columns_from_input', (['None'], {}), '(None)\n', (46833, 46839), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((47522, 47559), 'numpy.ones', 'np.ones', ([], {'shape': '[7, 8]', 'dtype': 'np.int32'}), '(shape=[7, 8], dtype=np.int32)\n', (47529, 47559), True, 'import numpy as np\n'), ((47996, 48033), 'numpy.ones', 'np.ones', ([], {'shape': '[7, 8]', 'dtype': 'np.int64'}), '(shape=[7, 8], dtype=np.int64)\n', (48003, 48033), True, 'import numpy as np\n'), ((48472, 48511), 'numpy.ones', 'np.ones', ([], {'shape': '[7, 8]', 'dtype': 'np.float32'}), '(shape=[7, 8], dtype=np.float32)\n', (48479, 48511), True, 'import numpy as np\n'), ((48958, 48997), 'numpy.ones', 'np.ones', ([], {'shape': '[7, 8]', 'dtype': 'np.float64'}), '(shape=[7, 8], dtype=np.float64)\n', (48965, 48997), True, 'import numpy as np\n'), ((51451, 51473), 'tensorflow.contrib.learn.python.learn.estimators.run_config.RunConfig', 'run_config.RunConfig', ([], {}), '()\n', (51471, 51473), False, 'from tensorflow.contrib.learn.python.learn.estimators import run_config\n'), ((51552, 51582), 'tensorflow.python.ops.variables.Variable', 'variables_lib.Variable', (['[1, 2]'], {}), '([1, 2])\n', (51574, 51582), True, 'from tensorflow.python.ops import variables as variables_lib\n'), ((51593, 51623), 'tensorflow.python.ops.variables.Variable', 'variables_lib.Variable', (['[2, 1]'], {}), '([2, 1])\n', (51615, 51623), True, 'from tensorflow.python.ops import variables as variables_lib\n'), ((52074, 52104), 'tensorflow.python.ops.variables.Variable', 'variables_lib.Variable', (['[1, 2]'], {}), '([1, 2])\n', (52096, 52104), True, 'from tensorflow.python.ops import variables as variables_lib\n'), ((52115, 52145), 'tensorflow.python.ops.variables.Variable', 'variables_lib.Variable', (['[2, 1]'], {}), '([2, 1])\n', (52137, 52145), True, 'from tensorflow.python.ops import variables as variables_lib\n'), ((52613, 52635), 'tensorflow.contrib.learn.python.learn.estimators.run_config.RunConfig', 'run_config.RunConfig', ([], {}), '()\n', (52633, 52635), False, 'from tensorflow.contrib.learn.python.learn.estimators import run_config\n'), ((52724, 52768), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[-1, -1]', 'dtypes.int64'], {}), '([-1, -1], dtypes.int64)\n', (52744, 52768), False, 'from tensorflow.python.framework import constant_op\n'), ((52783, 52848), 'tensorflow.contrib.lookup.MutableHashTable', 'lookup.MutableHashTable', (['dtypes.string', 'dtypes.int64', 'default_val'], {}), '(dtypes.string, dtypes.int64, default_val)\n', (52806, 52848), False, 'from tensorflow.contrib import lookup\n'), ((52870, 52918), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (["['brain', 'salad', 'tank']"], {}), "(['brain', 'salad', 'tank'])\n", (52890, 52918), False, 'from tensorflow.python.framework import constant_op\n'), ((53250, 53294), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[-1, -1]', 'dtypes.int64'], {}), '([-1, -1], dtypes.int64)\n', (53270, 53294), False, 'from tensorflow.python.framework import constant_op\n'), ((53309, 53374), 'tensorflow.contrib.lookup.MutableHashTable', 'lookup.MutableHashTable', (['dtypes.string', 'dtypes.int64', 'default_val'], {}), '(dtypes.string, dtypes.int64, default_val)\n', (53332, 53374), False, 'from tensorflow.contrib import lookup\n'), ((53396, 53444), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (["['brain', 'salad', 'tank']"], {}), "(['brain', 'salad', 'tank'])\n", (53416, 53444), False, 'from tensorflow.python.framework import constant_op\n'), ((53963, 53985), 'tensorflow.contrib.learn.python.learn.estimators.run_config.RunConfig', 'run_config.RunConfig', ([], {}), '()\n', (53983, 53985), False, 'from tensorflow.contrib.learn.python.learn.estimators import run_config\n'), ((54064, 54094), 'tensorflow.python.ops.variables.Variable', 'variables_lib.Variable', (['[1, 2]'], {}), '([1, 2])\n', (54086, 54094), True, 'from tensorflow.python.ops import variables as variables_lib\n'), ((54105, 54135), 'tensorflow.python.ops.variables.Variable', 'variables_lib.Variable', (['[2, 1]'], {}), '([2, 1])\n', (54127, 54135), True, 'from tensorflow.python.ops import variables as variables_lib\n'), ((7135, 7188), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['iris.data'], {'dtype': 'dtypes.float32'}), '(iris.data, dtype=dtypes.float32)\n', (7155, 7188), False, 'from tensorflow.python.framework import constant_op\n'), ((8449, 8502), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['iris.data'], {'dtype': 'dtypes.float32'}), '(iris.data, dtype=dtypes.float32)\n', (8469, 8502), False, 'from tensorflow.python.framework import constant_op\n'), ((9131, 9162), 'tensorflow.python.training.training_util.get_global_step', 'training_util.get_global_step', ([], {}), '()\n', (9160, 9162), False, 'from tensorflow.python.training import training_util\n'), ((9615, 9684), 'tensorflow.python.ops.control_flow_ops.group', 'control_flow_ops.group', (['train_op_1', 'training_op_2', 'update_global_step'], {}), '(train_op_1, training_op_2, update_global_step)\n', (9637, 9684), False, 'from tensorflow.python.ops import control_flow_ops\n'), ((10922, 11013), 'tensorflow.python.ops.check_ops.assert_equal', 'check_ops.assert_equal', (['expected_features[k]', 'actual_features[k]'], {'name': "('assert_%s' % k)"}), "(expected_features[k], actual_features[k], name=\n 'assert_%s' % k)\n", (10944, 11013), False, 'from tensorflow.python.ops import check_ops\n'), ((11067, 11143), 'tensorflow.python.ops.check_ops.assert_equal', 'check_ops.assert_equal', (['expected_labels', 'actual_labels'], {'name': '"""assert_labels"""'}), "(expected_labels, actual_labels, name='assert_labels')\n", (11089, 11143), False, 'from tensorflow.python.ops import check_ops\n'), ((11276, 11301), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(0.0)'], {}), '(0.0)\n', (11296, 11301), False, 'from tensorflow.python.framework import constant_op\n'), ((11315, 11340), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(0.0)'], {}), '(0.0)\n', (11335, 11340), False, 'from tensorflow.python.framework import constant_op\n'), ((11480, 11503), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['v'], {}), '(v)\n', (11500, 11503), False, 'from tensorflow.python.framework import constant_op\n'), ((14595, 14620), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(0.0)'], {}), '(0.0)\n', (14615, 14620), False, 'from tensorflow.python.framework import constant_op\n'), ((14621, 14646), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(0.0)'], {}), '(0.0)\n', (14641, 14646), False, 'from tensorflow.python.framework import constant_op\n'), ((15156, 15202), 'tensorflow.python.framework.ops.control_dependencies', 'ops.control_dependencies', (['[update_global_step]'], {}), '([update_global_step])\n', (15180, 15202), False, 'from tensorflow.python.framework import ops\n'), ((15722, 15768), 'tensorflow.python.framework.ops.control_dependencies', 'ops.control_dependencies', (['[update_global_step]'], {}), '([update_global_step])\n', (15746, 15768), False, 'from tensorflow.python.framework import ops\n'), ((16453, 16499), 'tensorflow.python.framework.ops.control_dependencies', 'ops.control_dependencies', (['[update_global_step]'], {}), '([update_global_step])\n', (16477, 16499), False, 'from tensorflow.python.framework import ops\n'), ((18478, 18507), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[[1.0]]'], {}), '([[1.0]])\n', (18498, 18507), False, 'from tensorflow.python.framework import constant_op\n'), ((19418, 19463), 'tensorflow.contrib.learn.python.learn.estimators.estimator.Estimator', 'estimator.Estimator', ([], {'model_fn': 'linear_model_fn'}), '(model_fn=linear_model_fn)\n', (19437, 19463), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((24205, 24230), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {}), '(1.0)\n', (24225, 24230), False, 'from tensorflow.python.framework import constant_op\n'), ((28339, 28362), 'tensorflow.contrib.learn.python.learn.estimators.estimator.SKCompat', 'estimator.SKCompat', (['est'], {}), '(est)\n', (28357, 28362), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((28580, 28603), 'tensorflow.contrib.learn.python.learn.estimators.estimator.SKCompat', 'estimator.SKCompat', (['est'], {}), '(est)\n', (28598, 28603), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((29857, 29868), 'numpy.int32', 'np.int32', (['v'], {}), '(v)\n', (29865, 29868), True, 'import numpy as np\n'), ((31508, 31539), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (31528, 31539), False, 'from tensorflow.python.framework import constant_op\n'), ((33475, 33514), 'tensorflow.python.summary.summary.histogram', 'summary.histogram', (['"""histogram"""', 'metrics'], {}), "('histogram', metrics)\n", (33492, 33514), False, 'from tensorflow.python.summary import summary\n'), ((35390, 35430), 'tensorflow.python.framework.ops.get_collection', 'ops.get_collection', (['ops.GraphKeys.LOSSES'], {}), '(ops.GraphKeys.LOSSES)\n', (35408, 35430), False, 'from tensorflow.python.framework import ops\n'), ((38713, 38745), 'tensorflow.python.client.session.Session', 'session_lib.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (38732, 38745), True, 'from tensorflow.python.client import session as session_lib\n'), ((38763, 38817), 'tensorflow.python.saved_model.loader.load', 'loader.load', (['sess', '[tag_constants.SERVING]', 'export_dir'], {}), '(sess, [tag_constants.SERVING], export_dir)\n', (38774, 38817), False, 'from tensorflow.python.saved_model import loader\n'), ((40750, 40782), 'tensorflow.python.client.session.Session', 'session_lib.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (40769, 40782), True, 'from tensorflow.python.client import session as session_lib\n'), ((40800, 40854), 'tensorflow.python.saved_model.loader.load', 'loader.load', (['sess', '[tag_constants.SERVING]', 'export_dir'], {}), '(sess, [tag_constants.SERVING], export_dir)\n', (40811, 40854), False, 'from tensorflow.python.saved_model import loader\n'), ((44245, 44277), 'tensorflow.python.client.session.Session', 'session_lib.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (44264, 44277), True, 'from tensorflow.python.client import session as session_lib\n'), ((44295, 44330), 'tensorflow.python.saved_model.loader.load', 'loader.load', (['sess', 'tags', 'export_dir'], {}), '(sess, tags, export_dir)\n', (44306, 44330), False, 'from tensorflow.python.saved_model import loader\n'), ((45319, 45351), 'tensorflow.python.client.session.Session', 'session_lib.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (45338, 45351), True, 'from tensorflow.python.client import session as session_lib\n'), ((45369, 45404), 'tensorflow.python.saved_model.loader.load', 'loader.load', (['sess', 'tags', 'export_dir'], {}), '(sess, tags, export_dir)\n', (45380, 45404), False, 'from tensorflow.python.saved_model import loader\n'), ((46961, 46986), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {}), '(1.0)\n', (46981, 46986), False, 'from tensorflow.python.framework import constant_op\n'), ((47296, 47367), 'tensorflow.python.ops.parsing_ops.FixedLenFeature', 'parsing_ops.FixedLenFeature', ([], {'shape': 'expected_shape', 'dtype': 'expected_dtype'}), '(shape=expected_shape, dtype=expected_dtype)\n', (47323, 47367), False, 'from tensorflow.python.ops import parsing_ops\n'), ((51495, 51539), 'tensorflow.contrib.learn.python.learn.estimators.estimator._get_replica_device_setter', 'estimator._get_replica_device_setter', (['config'], {}), '(config)\n', (51531, 51539), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((52657, 52701), 'tensorflow.contrib.learn.python.learn.estimators.estimator._get_replica_device_setter', 'estimator._get_replica_device_setter', (['config'], {}), '(config)\n', (52693, 52701), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((54007, 54051), 'tensorflow.contrib.learn.python.learn.estimators.estimator._get_replica_device_setter', 'estimator._get_replica_device_setter', (['config'], {}), '(config)\n', (54043, 54051), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((11528, 11551), 'six.iteritems', 'six.iteritems', (['features'], {}), '(features)\n', (11541, 11551), False, 'import six\n'), ((15099, 15130), 'tensorflow.python.training.training_util.get_global_step', 'training_util.get_global_step', ([], {}), '()\n', (15128, 15130), False, 'from tensorflow.python.training import training_util\n'), ((15665, 15696), 'tensorflow.python.training.training_util.get_global_step', 'training_util.get_global_step', ([], {}), '()\n', (15694, 15696), False, 'from tensorflow.python.training import training_util\n'), ((16396, 16427), 'tensorflow.python.training.training_util.get_global_step', 'training_util.get_global_step', ([], {}), '()\n', (16425, 16427), False, 'from tensorflow.python.training import training_util\n'), ((17026, 17074), 'functools.partial', 'functools.partial', (['boston_input_fn'], {'num_epochs': '(1)'}), '(boston_input_fn, num_epochs=1)\n', (17043, 17074), False, 'import functools\n'), ((17448, 17473), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(0.0)'], {}), '(0.0)\n', (17468, 17473), False, 'from tensorflow.python.framework import constant_op\n'), ((17489, 17514), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(0.0)'], {}), '(0.0)\n', (17509, 17514), False, 'from tensorflow.python.framework import constant_op\n'), ((17600, 17644), 'tensorflow.python.training.monitored_session.Scaffold', 'monitored_session.Scaffold', ([], {'init_fn': '_init_fn'}), '(init_fn=_init_fn)\n', (17626, 17644), False, 'from tensorflow.python.training import monitored_session\n'), ((18181, 18210), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[[1.0]]'], {}), '([[1.0]])\n', (18201, 18210), False, 'from tensorflow.python.framework import constant_op\n'), ((18226, 18251), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(0.0)'], {}), '(0.0)\n', (18246, 18251), False, 'from tensorflow.python.framework import constant_op\n'), ((18337, 18386), 'tensorflow.python.training.monitored_session.Scaffold', 'monitored_session.Scaffold', ([], {'saver': 'self.mock_saver'}), '(saver=self.mock_saver)\n', (18363, 18386), False, 'from tensorflow.python.training import monitored_session\n'), ((18439, 18468), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[[1.0]]'], {}), '([[1.0]])\n', (18459, 18468), False, 'from tensorflow.python.framework import constant_op\n'), ((20263, 20286), 'tensorflow.python.framework.ops.get_default_graph', 'ops.get_default_graph', ([], {}), '()\n', (20284, 20286), False, 'from tensorflow.python.framework import ops\n'), ((20307, 20336), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[[1.0]]'], {}), '([[1.0]])\n', (20327, 20336), False, 'from tensorflow.python.framework import constant_op\n'), ((20337, 20364), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[1.0]'], {}), '([1.0])\n', (20357, 20364), False, 'from tensorflow.python.framework import constant_op\n'), ((28817, 28840), 'tensorflow.contrib.learn.python.learn.estimators.estimator.SKCompat', 'estimator.SKCompat', (['est'], {}), '(est)\n', (28835, 28840), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((31467, 31498), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (31487, 31498), False, 'from tensorflow.python.framework import constant_op\n'), ((36819, 36846), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['export_dir'], {}), '(export_dir)\n', (36834, 36846), False, 'from tensorflow.python.util import compat\n'), ((36864, 36897), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['"""saved_model.pb"""'], {}), "('saved_model.pb')\n", (36879, 36897), False, 'from tensorflow.python.util import compat\n'), ((36986, 37013), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['export_dir'], {}), '(export_dir)\n', (37001, 37013), False, 'from tensorflow.python.util import compat\n'), ((37015, 37043), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['"""variables"""'], {}), "('variables')\n", (37030, 37043), False, 'from tensorflow.python.util import compat\n'), ((37132, 37159), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['export_dir'], {}), '(export_dir)\n', (37147, 37159), False, 'from tensorflow.python.util import compat\n'), ((37177, 37221), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['"""variables/variables.index"""'], {}), "('variables/variables.index')\n", (37192, 37221), False, 'from tensorflow.python.util import compat\n'), ((37310, 37337), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['export_dir'], {}), '(export_dir)\n', (37325, 37337), False, 'from tensorflow.python.util import compat\n'), ((37355, 37413), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['"""variables/variables.data-00000-of-00001"""'], {}), "('variables/variables.data-00000-of-00001')\n", (37370, 37413), False, 'from tensorflow.python.util import compat\n'), ((37503, 37530), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['export_dir'], {}), '(export_dir)\n', (37518, 37530), False, 'from tensorflow.python.util import compat\n'), ((37532, 37557), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['"""assets"""'], {}), "('assets')\n", (37547, 37557), False, 'from tensorflow.python.util import compat\n'), ((37646, 37673), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['export_dir'], {}), '(export_dir)\n', (37661, 37673), False, 'from tensorflow.python.util import compat\n'), ((37691, 37730), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['"""assets/my_vocab_file"""'], {}), "('assets/my_vocab_file')\n", (37706, 37730), False, 'from tensorflow.python.util import compat\n'), ((38238, 38265), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['export_dir'], {}), '(export_dir)\n', (38253, 38265), False, 'from tensorflow.python.util import compat\n'), ((38267, 38298), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['"""assets.extra"""'], {}), "('assets.extra')\n", (38282, 38298), False, 'from tensorflow.python.util import compat\n'), ((38667, 38678), 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), '()\n', (38676, 38678), False, 'from tensorflow.python.framework import ops\n'), ((40036, 40063), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['export_dir'], {}), '(export_dir)\n', (40051, 40063), False, 'from tensorflow.python.util import compat\n'), ((40081, 40114), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['"""saved_model.pb"""'], {}), "('saved_model.pb')\n", (40096, 40114), False, 'from tensorflow.python.util import compat\n'), ((40203, 40230), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['export_dir'], {}), '(export_dir)\n', (40218, 40230), False, 'from tensorflow.python.util import compat\n'), ((40232, 40260), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['"""variables"""'], {}), "('variables')\n", (40247, 40260), False, 'from tensorflow.python.util import compat\n'), ((40349, 40376), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['export_dir'], {}), '(export_dir)\n', (40364, 40376), False, 'from tensorflow.python.util import compat\n'), ((40394, 40438), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['"""variables/variables.index"""'], {}), "('variables/variables.index')\n", (40409, 40438), False, 'from tensorflow.python.util import compat\n'), ((40527, 40554), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['export_dir'], {}), '(export_dir)\n', (40542, 40554), False, 'from tensorflow.python.util import compat\n'), ((40572, 40630), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['"""variables/variables.data-00000-of-00001"""'], {}), "('variables/variables.data-00000-of-00001')\n", (40587, 40630), False, 'from tensorflow.python.util import compat\n'), ((40704, 40715), 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), '()\n', (40713, 40715), False, 'from tensorflow.python.framework import ops\n'), ((41940, 41981), 'tensorflow.contrib.learn.python.learn.estimators.estimator.GraphRewriteSpec', 'estimator.GraphRewriteSpec', (["['tag_1']", '[]'], {}), "(['tag_1'], [])\n", (41966, 41981), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((41995, 42065), 'tensorflow.contrib.learn.python.learn.estimators.estimator.GraphRewriteSpec', 'estimator.GraphRewriteSpec', (["['tag_2', 'tag_3']", "['strip_unused_nodes']"], {}), "(['tag_2', 'tag_3'], ['strip_unused_nodes'])\n", (42021, 42065), False, 'from tensorflow.contrib.learn.python.learn.estimators import estimator\n'), ((42299, 42326), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['export_dir'], {}), '(export_dir)\n', (42314, 42326), False, 'from tensorflow.python.util import compat\n'), ((42344, 42377), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['"""saved_model.pb"""'], {}), "('saved_model.pb')\n", (42359, 42377), False, 'from tensorflow.python.util import compat\n'), ((42466, 42493), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['export_dir'], {}), '(export_dir)\n', (42481, 42493), False, 'from tensorflow.python.util import compat\n'), ((42495, 42523), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['"""variables"""'], {}), "('variables')\n", (42510, 42523), False, 'from tensorflow.python.util import compat\n'), ((42612, 42639), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['export_dir'], {}), '(export_dir)\n', (42627, 42639), False, 'from tensorflow.python.util import compat\n'), ((42657, 42701), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['"""variables/variables.index"""'], {}), "('variables/variables.index')\n", (42672, 42701), False, 'from tensorflow.python.util import compat\n'), ((42790, 42817), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['export_dir'], {}), '(export_dir)\n', (42805, 42817), False, 'from tensorflow.python.util import compat\n'), ((42835, 42893), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['"""variables/variables.data-00000-of-00001"""'], {}), "('variables/variables.data-00000-of-00001')\n", (42850, 42893), False, 'from tensorflow.python.util import compat\n'), ((42983, 43010), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['export_dir'], {}), '(export_dir)\n', (42998, 43010), False, 'from tensorflow.python.util import compat\n'), ((43012, 43037), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['"""assets"""'], {}), "('assets')\n", (43027, 43037), False, 'from tensorflow.python.util import compat\n'), ((43126, 43153), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['export_dir'], {}), '(export_dir)\n', (43141, 43153), False, 'from tensorflow.python.util import compat\n'), ((43171, 43210), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['"""assets/my_vocab_file"""'], {}), "('assets/my_vocab_file')\n", (43186, 43210), False, 'from tensorflow.python.util import compat\n'), ((43718, 43745), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['export_dir'], {}), '(export_dir)\n', (43733, 43745), False, 'from tensorflow.python.util import compat\n'), ((43747, 43778), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['"""assets.extra"""'], {}), "('assets.extra')\n", (43762, 43778), False, 'from tensorflow.python.util import compat\n'), ((44199, 44210), 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), '()\n', (44208, 44210), False, 'from tensorflow.python.framework import ops\n'), ((45273, 45284), 'tensorflow.python.framework.ops.Graph', 'ops.Graph', ([], {}), '()\n', (45282, 45284), False, 'from tensorflow.python.framework import ops\n'), ((47757, 47805), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', ([], {'shape': '[7, 8]', 'dtype': 'dtypes.int32'}), '(shape=[7, 8], dtype=dtypes.int32)\n', (47771, 47805), False, 'from tensorflow.python.ops import array_ops\n'), ((48231, 48279), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', ([], {'shape': '[7, 8]', 'dtype': 'dtypes.int64'}), '(shape=[7, 8], dtype=dtypes.int64)\n', (48245, 48279), False, 'from tensorflow.python.ops import array_ops\n'), ((48713, 48763), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', ([], {'shape': '[7, 8]', 'dtype': 'dtypes.float32'}), '(shape=[7, 8], dtype=dtypes.float32)\n', (48727, 48763), False, 'from tensorflow.python.ops import array_ops\n'), ((49199, 49249), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', ([], {'shape': '[7, 8]', 'dtype': 'dtypes.float64'}), '(shape=[7, 8], dtype=dtypes.float64)\n', (49213, 49249), False, 'from tensorflow.python.ops import array_ops\n'), ((51406, 51427), 'json.dumps', 'json.dumps', (['tf_config'], {}), '(tf_config)\n', (51416, 51427), False, 'import json\n'), ((52038, 52060), 'tensorflow.contrib.learn.python.learn.estimators.run_config.RunConfig', 'run_config.RunConfig', ([], {}), '()\n', (52058, 52060), False, 'from tensorflow.contrib.learn.python.learn.estimators import run_config\n'), ((52568, 52589), 'json.dumps', 'json.dumps', (['tf_config'], {}), '(tf_config)\n', (52578, 52589), False, 'import json\n'), ((53204, 53226), 'tensorflow.contrib.learn.python.learn.estimators.run_config.RunConfig', 'run_config.RunConfig', ([], {}), '()\n', (53224, 53226), False, 'from tensorflow.contrib.learn.python.learn.estimators import run_config\n'), ((53918, 53939), 'json.dumps', 'json.dumps', (['tf_config'], {}), '(tf_config)\n', (53928, 53939), False, 'import json\n'), ((11358, 11389), 'tensorflow.python.training.training_util.get_global_step', 'training_util.get_global_step', ([], {}), '()\n', (11387, 11389), False, 'from tensorflow.python.training import training_util\n'), ((14661, 14692), 'tensorflow.python.training.training_util.get_global_step', 'training_util.get_global_step', ([], {}), '()\n', (14690, 14692), False, 'from tensorflow.python.training import training_util\n'), ((38448, 38480), 'tensorflow.python.platform.gfile.GFile', 'gfile.GFile', (['expected_extra_path'], {}), '(expected_extra_path)\n', (38459, 38480), False, 'from tensorflow.python.platform import gfile\n'), ((39332, 39352), 'tensorflow.python.util.compat.as_str_any', 'compat.as_str_any', (['x'], {}), '(x)\n', (39349, 39352), False, 'from tensorflow.python.util import compat\n'), ((43928, 43960), 'tensorflow.python.platform.gfile.GFile', 'gfile.GFile', (['expected_extra_path'], {}), '(expected_extra_path)\n', (43939, 43960), False, 'from tensorflow.python.platform import gfile\n'), ((49848, 49908), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(False)'], {'shape': '[7, 8]', 'dtype': 'dtypes.bool'}), '(False, shape=[7, 8], dtype=dtypes.bool)\n', (49868, 49908), False, 'from tensorflow.python.framework import constant_op\n'), ((17534, 17565), 'tensorflow.python.training.training_util.get_global_step', 'training_util.get_global_step', ([], {}), '()\n', (17563, 17565), False, 'from tensorflow.python.training import training_util\n'), ((18271, 18302), 'tensorflow.python.training.training_util.get_global_step', 'training_util.get_global_step', ([], {}), '()\n', (18300, 18302), False, 'from tensorflow.python.training import training_util\n'), ((49582, 49591), 'six.moves.xrange', 'xrange', (['(7)'], {}), '(7)\n', (49588, 49591), False, 'from six.moves import xrange\n'), ((50216, 50225), 'six.moves.xrange', 'xrange', (['(7)'], {}), '(7)\n', (50222, 50225), False, 'from six.moves import xrange\n'), ((37901, 37928), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['export_dir'], {}), '(export_dir)\n', (37916, 37928), False, 'from tensorflow.python.util import compat\n'), ((37950, 37989), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['"""assets/my_vocab_file"""'], {}), "('assets/my_vocab_file')\n", (37965, 37989), False, 'from tensorflow.python.util import compat\n'), ((43381, 43408), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['export_dir'], {}), '(export_dir)\n', (43396, 43408), False, 'from tensorflow.python.util import compat\n'), ((43430, 43469), 'tensorflow.python.util.compat.as_bytes', 'compat.as_bytes', (['"""assets/my_vocab_file"""'], {}), "('assets/my_vocab_file')\n", (43445, 43469), False, 'from tensorflow.python.util import compat\n'), ((49562, 49571), 'six.moves.xrange', 'xrange', (['(8)'], {}), '(8)\n', (49568, 49571), False, 'from six.moves import xrange\n'), ((50196, 50205), 'six.moves.xrange', 'xrange', (['(8)'], {}), '(8)\n', (50202, 50205), False, 'from six.moves import xrange\n'), ((50635, 50644), 'six.moves.xrange', 'xrange', (['(7)'], {}), '(7)\n', (50641, 50644), False, 'from six.moves import xrange\n'), ((50579, 50588), 'six.moves.xrange', 'xrange', (['(8)'], {}), '(8)\n', (50585, 50588), False, 'from six.moves import xrange\n')] |
#!/usr/bin/python
# BSD 3-Clause License
# Copyright (c) 2019, <NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import cv2
def resize_image(height, width, image, interpolation=None):
"""A function that resizes a provided picture.
Inputs: width and height to resize to
image to resize
Outputs: input_image_resized"""
if interpolation is None:
if str(image.dtype).startswith(("int", "bool")):
interpolation = cv2.INTER_NEAREST
else:
interpolation = cv2.INTER_LINEAR # default
image_resized = cv2.resize(image, dsize=(width, height), interpolation=interpolation)
return image_resized
class ResizeAndCrop(object):
"""Resize And Crop to process and back"""
def __init__(self, hypes, original_image_size):
"""A function that provides the indices to start and stop cropping the picture at.
Inputs: hypes file to get crop parameters,
original_image_size
Define: crop_y_from, crop_y_to, crop_x_from, crop_x_to, processing_image_size"""
def _get(h, field):
""" Get field from h if such present, else return False"""
if field in h.keys():
return h[field]
return False
if 'jitter' in hypes.keys():
h_ = hypes['jitter']
else:
h_ = hypes
# ------------- resize_image -----------------------
self.resize_image = _get(h_, 'reseize_image') or _get(h_, 'resize_image')
if self.resize_image:
inter_image_size = (h_['image_width'], h_['image_height'])
else:
inter_image_size = original_image_size[:2] # float
self.inter_image_size = inter_image_size
# ------------- crop_for_processing -----------------------
self.crop_for_processing = _get(h_, 'crop_for_processing')
if self.crop_for_processing:
if 'crop_x_from' in h_.keys():
self.crop_x_from = int(inter_image_size[0] * h_['crop_x_from'])
else:
self.crop_x_from = int(0)
if 'crop_x_to' in hypes['jitter'].keys():
self.crop_x_to = int(inter_image_size[0] * h_['crop_x_to'])
else:
self.crop_x_to = int(inter_image_size[0])
if 'crop_y_from' in h_.keys():
self.crop_y_from = int(inter_image_size[1] * h_['crop_y_from'])
else:
self.crop_y_from = int(0)
if 'crop_y_to' in h_.keys():
self.crop_y_to = int(inter_image_size[1] * h_['crop_y_to'])
else:
self.crop_y_to = int(inter_image_size[1])
self.processing_image_size = (
self.crop_x_to - self.crop_x_from, self.crop_y_to - self.crop_y_from)
else:
self.processing_image_size = inter_image_size
def preprocess_image(self, image, image_uncropped=None):
"""A function that does all of the image preprocessing
Inputs: image to process
image_uncropped empty image for postprocessing (allocated if is None)
Outputs: preprocessed image, image_uncropped"""
preprocessed_image = image
# Resize the image
if self.resize_image:
#self.inter_image_size = (h_['image_width'], h_['image_height'])
preprocessed_image = resize_image(self.inter_image_size[1], # -> image_height
self.inter_image_size[0], # -> image_width
image)
# Crop the image
if self.crop_for_processing:
if image_uncropped is None:
image_uncropped = np.zeros(
(preprocessed_image.shape[0], preprocessed_image.shape[1]))
preprocessed_image = preprocessed_image[self.crop_y_from:self.crop_y_to, self.crop_x_from:self.crop_x_to]
return preprocessed_image, image_uncropped
def postprocess_image(self, image,
output_image_uncropped,
resulting_image_for_shape, # image shape to resize back, only shape is used
filter_data=None):
"""A function that does all of the image preprocessing for KittiSeg
Inputs: image to process
output_image_uncropped empty image for postprocessing
Outputs: way_prediction"""
#Insert the cropped image into the full sized image
if self.crop_for_processing:
output_image_uncropped[self.crop_y_from:self.crop_y_to, self.crop_x_from:self.crop_x_to] = image
image = output_image_uncropped
#Resize the image to its original size
if self.resize_image:
image = resize_image(resulting_image_for_shape.shape[0], resulting_image_for_shape.shape[1], image)
# Accept all pixel with conf >= threshold as positive prediction
# This creates a `hard` prediction result for class street
if str(image.dtype).startswith("float"):
if filter_data is None:
filter_data = 0.5
way_prediction = image > filter_data
elif str(image.dtype).startswith("int"):
way_prediction = image.copy()
elif str(image.dtype).startswith("bool"):
way_prediction = image.copy()
else:
print(image.dtype)
assert str(image.dtype).startswith(("float", "int", "bool"))
return way_prediction
| [
"numpy.zeros",
"cv2.resize"
] | [((2055, 2124), 'cv2.resize', 'cv2.resize', (['image'], {'dsize': '(width, height)', 'interpolation': 'interpolation'}), '(image, dsize=(width, height), interpolation=interpolation)\n', (2065, 2124), False, 'import cv2\n'), ((5232, 5300), 'numpy.zeros', 'np.zeros', (['(preprocessed_image.shape[0], preprocessed_image.shape[1])'], {}), '((preprocessed_image.shape[0], preprocessed_image.shape[1]))\n', (5240, 5300), True, 'import numpy as np\n')] |
#===============================WIMPFuncs.py===================================#
# Created by <NAME> 2020
# Contains all the functions for doing the WIMPy calculations
#==============================================================================#
import numpy as np
from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace
from numpy import cos, sin, arctan2, arccos, trapz, ones, log10, ndim, meshgrid
from numpy import nan, isnan, column_stack, amin, amax, zeros_like
from numpy.linalg import norm
from scipy.special import erf
import Params
from Params import m_p_keV, c_km, seconds2year, m_p_kg, GeV_2_kg, c_cm, Jan1
#==============================================================================#
#-------------------- Energy-Time dependent recoil rate------------------------#
#---------------------------------- v_min -------------------------------------#
def MinimumWIMPSpeed(E_r,A,m_chi,delta=0):
# E_r = recoil energy in keVr
# A = nucleus mass number
# m_chi = Wimp mass in GeV
# delta = for inelastic scattering
mu_p = 1.0e6*m_chi*m_p_keV/(1.0e6*m_chi + m_p_keV) # reduced proton mass
m_N_keV = A*m_p_keV # nucleus mass in keV
mu_N_keV = 1.0e6*m_chi*m_N_keV/(1.0e6*m_chi + m_N_keV) # reduced nucleus mass
v_min = sqrt(1.0/(2*m_N_keV*E_r))*(m_N_keV*E_r/mu_N_keV + delta)*c_km
return v_min
#---------------------------------- E_max -------------------------------------#
def MaxWIMPEnergy(A,m_chi,v_lab=245.6,v_esc=533.0):
# A = nucleus mass number
# v_lab = Lab velocity in km/s
# m_chi = Wimp mass in GeV
# v_esc = Escape speed in km/s
m_N = m_p_keV*A
mu_N = 1.0e6*m_N*m_chi/(1.0e6*m_chi+m_N)
E_max_lim = 2.0*mu_N*mu_N*2.0*((v_esc+v_lab)/c_km)**2.0/m_N
return E_max_lim
def MinimumWIMPSpeed(E_r,A,m_chi,delta=0):
# E_r = recoil energy in keVr
# A = nucleus mass number
# m_chi = WIMP mass in GeV
# delta = for inelastic scattering
m_N_keV = A*m_p_keV # nucleus mass in keV
mu_N_keV = 1.0e6*m_chi*m_N_keV/(1.0e6*m_chi + m_N_keV) # reduced nucleus mass
v_min = sqrt(1.0/(2*m_N_keV*E_r))*(m_N_keV*E_r/mu_N_keV + delta)*c_km
return v_min
#-------------------- Mean Inverse Speed (for Gaussian f(v)) --------------------------#
def MeanInverseSpeed_SHM(v_min,sig_v=167.0,v_esc=533.0,v_lab=245.6):
N_esc = erf(v_esc/(sqrt(2.0)*sig_v))\
-sqrt(2.0/pi)*(v_esc/sig_v)*exp(-v_esc**2.0/(2.0*sig_v**2.0))
# Define:
v_0 = sig_v*sqrt(2.0)
x = v_min/v_0
z = v_esc/v_0
y = v_lab/v_0
# Set up conditional terms
g = zeros_like(v_min)
g[(x<abs(y-z))&(z<y)] = (1.0/(v_0*y))
g2 = (1.0/(2.0*N_esc*v_0*y))*(erf(x+y)-erf(x-y)-(4.0/sqrt(pi))*y*exp(-z**2))
g3 = (1.0/(2.0*N_esc*v_0*y))*(erf(z)-erf(x-y)-(2.0/sqrt(pi))*(y+z-x)*exp(-z**2))
# Apply conditions
g[(x<abs(y-z))&(z>y)] = g2[(x<abs(y-z))&(z>y)]
g[(abs(y-z)<x)&(x<(y+z))] = g3[(abs(y-z)<x)&(x<(y+z))]
g[(y+z)<x] = 0.0
return g
#--------------------Helm Form Factor-------------------------------------------#
def C_SI(Nuc,):
A = Nuc.MassNumber
return A**2
def C_SDp(Nuc):
S_p = Nuc.ExpProtonSpin
J = Nuc.NuclearSpin
return (4/3)*((J+1)/J)*(S_p)**2
def C_SDn(Nuc):
S_n = Nuc.ExpNeutronSpin
J = Nuc.NuclearSpin
return (4/3)*((J+1)/J)*(S_n)**2
def C_SDpn(Nuc):
S_p = Nuc.ExpProtonSpin
S_n = Nuc.ExpNeutronSpin
J = Nuc.NuclearSpin
return (4/3)*((J+1)/J)*(S_p**2+S_n**2)
def dRdE(E_r,m_chi,sigma_p,Nuc,NuclearEnhancementFactor,FormFactor,gvmin,rho_0=0.3):
'''
* Spin independent differentual recoil rate that takes in recoil energy in
units of keVr and a proton cross section in units of cm^2 and outputs a rate
in units of (ton year keVr)^-1
* gvmin_function should be a function that takes in v_min in (km/s) and outputs
g(v_min) in units of (km/s)^-1
'''
A = Nuc.MassNumber
C = NuclearEnhancementFactor(Nuc)
# DM-proton reduced mass (in units of keV)
mu_p = 1.0e6*m_chi*m_p_keV/(1.0e6*m_chi + m_p_keV)
# Rate constants (in units cm kg^-1 s^-2)
R0 = (c_cm**2)*((rho_0*1.0e6*C*sigma_p)/(2*m_chi*GeV_2_kg*mu_p**2))
# Mean inverse speed
v_min = MinimumWIMPSpeed(E_r,A,m_chi)
g = gvmin(v_min)/(1000.0*100.0) # convert to cm^-1 s
# Compute rate = (Rate amplitude * gmin * form factor)
FF = FormFactor(E_r,A)**2.0
dR = R0*g*FF
dR = dR*seconds2year*1000.0 # convert to (ton-year-keV)^-1
return dR
def BinnedWIMPRate(E_th,E_max,ne,m_vals,Nuc,NuclearEnhancementFactor,FormFactor,gvmin,**kwargs):
nm = size(m_vals)
E_be = logspace(log10(E_th),log10(E_max),ne+1)
R = zeros(shape=(nm,ne))
for i in range(0,nm):
E_r_max = MaxWIMPEnergy(Nuc.MassNumber,m_vals[i],**kwargs)
Efine = logspace(log10(E_th),log10(E_r_max),1000)
R_tot = trapz(dRdE(Efine,m_vals[i],1.0e-45,Nuc,NuclearEnhancementFactor,FormFactor,gvmin,**kwargs),Efine)
dR = dRdE(E_be,m_vals[i],1.0e-45,Nuc,NuclearEnhancementFactor,FormFactor,gvmin,**kwargs)
R[i,:] = 0.5*(E_be[1:]-E_be[0:-1])*(dR[1:]+dR[0:-1])
R[i,:] = R_tot*R[i,:]/sum(R[i,:])
return R
| [
"numpy.size",
"numpy.zeros_like",
"numpy.zeros",
"scipy.special.erf",
"numpy.exp",
"numpy.log10",
"numpy.sqrt"
] | [((2588, 2605), 'numpy.zeros_like', 'zeros_like', (['v_min'], {}), '(v_min)\n', (2598, 2605), False, 'from numpy import nan, isnan, column_stack, amin, amax, zeros_like\n'), ((4594, 4606), 'numpy.size', 'size', (['m_vals'], {}), '(m_vals)\n', (4598, 4606), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((4667, 4688), 'numpy.zeros', 'zeros', ([], {'shape': '(nm, ne)'}), '(shape=(nm, ne))\n', (4672, 4688), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((2484, 2493), 'numpy.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (2488, 2493), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((4628, 4639), 'numpy.log10', 'log10', (['E_th'], {}), '(E_th)\n', (4633, 4639), False, 'from numpy import cos, sin, arctan2, arccos, trapz, ones, log10, ndim, meshgrid\n'), ((4640, 4652), 'numpy.log10', 'log10', (['E_max'], {}), '(E_max)\n', (4645, 4652), False, 'from numpy import cos, sin, arctan2, arccos, trapz, ones, log10, ndim, meshgrid\n'), ((1287, 1318), 'numpy.sqrt', 'sqrt', (['(1.0 / (2 * m_N_keV * E_r))'], {}), '(1.0 / (2 * m_N_keV * E_r))\n', (1291, 1318), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((2099, 2130), 'numpy.sqrt', 'sqrt', (['(1.0 / (2 * m_N_keV * E_r))'], {}), '(1.0 / (2 * m_N_keV * E_r))\n', (2103, 2130), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((2419, 2460), 'numpy.exp', 'exp', (['(-v_esc ** 2.0 / (2.0 * sig_v ** 2.0))'], {}), '(-v_esc ** 2.0 / (2.0 * sig_v ** 2.0))\n', (2422, 2460), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((4806, 4817), 'numpy.log10', 'log10', (['E_th'], {}), '(E_th)\n', (4811, 4817), False, 'from numpy import cos, sin, arctan2, arccos, trapz, ones, log10, ndim, meshgrid\n'), ((4818, 4832), 'numpy.log10', 'log10', (['E_r_max'], {}), '(E_r_max)\n', (4823, 4832), False, 'from numpy import cos, sin, arctan2, arccos, trapz, ones, log10, ndim, meshgrid\n'), ((2392, 2406), 'numpy.sqrt', 'sqrt', (['(2.0 / pi)'], {}), '(2.0 / pi)\n', (2396, 2406), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((2682, 2692), 'scipy.special.erf', 'erf', (['(x + y)'], {}), '(x + y)\n', (2685, 2692), False, 'from scipy.special import erf\n'), ((2691, 2701), 'scipy.special.erf', 'erf', (['(x - y)'], {}), '(x - y)\n', (2694, 2701), False, 'from scipy.special import erf\n'), ((2717, 2729), 'numpy.exp', 'exp', (['(-z ** 2)'], {}), '(-z ** 2)\n', (2720, 2729), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((2763, 2769), 'scipy.special.erf', 'erf', (['z'], {}), '(z)\n', (2766, 2769), False, 'from scipy.special import erf\n'), ((2770, 2780), 'scipy.special.erf', 'erf', (['(x - y)'], {}), '(x - y)\n', (2773, 2780), False, 'from scipy.special import erf\n'), ((2802, 2814), 'numpy.exp', 'exp', (['(-z ** 2)'], {}), '(-z ** 2)\n', (2805, 2814), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((2360, 2369), 'numpy.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (2364, 2369), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((2705, 2713), 'numpy.sqrt', 'sqrt', (['pi'], {}), '(pi)\n', (2709, 2713), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n'), ((2784, 2792), 'numpy.sqrt', 'sqrt', (['pi'], {}), '(pi)\n', (2788, 2792), False, 'from numpy import pi, sqrt, exp, zeros, size, shape, array, linspace, logspace\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
import pycocotools.coco as coco
import numpy as np
import torch
import json
import cv2
import os
from utils.image import flip, color_aug
from utils.image import get_affine_transform, affine_transform
from utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian
import pycocotools.coco as coco
import math
class EXDetDataset(data.Dataset):
def _coco_box_to_bbox(self, box):
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]], dtype=np.float32)
return bbox
def _get_border(self, border, size):
i = 1
while size - border // i <= border // i:
i *= 2
return border // i
def __getitem__(self, index):
img_id = self.images[index]
img_info = self.coco.loadImgs(ids=[img_id])[0]
img_path = os.path.join(self.img_dir, img_info['file_name'])
img = cv2.imread(img_path)
height, width = img.shape[0], img.shape[1]
c = np.array([img.shape[1] / 2., img.shape[0] / 2.])
s = max(img.shape[0], img.shape[1]) * 1.0
flipped = False
if self.split == 'train':
if not self.opt.not_rand_crop:
s = s * np.random.choice(np.arange(0.6, 1.4, 0.1))
w_border = self._get_border(128, img.shape[1])
h_border = self._get_border(128, img.shape[0])
c[0] = np.random.randint(low=w_border, high=img.shape[1] - w_border)
c[1] = np.random.randint(low=h_border, high=img.shape[0] - h_border)
else:
sf = self.opt.scale
cf = self.opt.shift
s = s * np.clip(np.random.randn() * sf + 1, 1 - sf, 1 + sf)
c[0] += img.shape[1] * np.clip(np.random.randn() * cf, -2 * cf, 2 * cf)
c[1] += img.shape[0] * np.clip(np.random.randn() * cf, -2 * cf, 2 * cf)
if np.random.random() < self.opt.flip:
flipped = True
img = img[:, ::-1, :]
trans_input = get_affine_transform(c, s, 0, [self.opt.input_res, self.opt.input_res])
inp = cv2.warpAffine(img, trans_input, (self.opt.input_res, self.opt.input_res), flags=cv2.INTER_LINEAR)
inp = (inp.astype(np.float32) / 255.)
if self.split == 'train' and not self.opt.no_color_aug:
color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)
inp = (inp - self.mean) / self.std
inp = inp.transpose(2, 0, 1)
output_res = self.opt.output_res
num_classes = self.opt.num_classes
trans_output = get_affine_transform(c, s, 0, [output_res, output_res])
num_hm = 1 if self.opt.agnostic_ex else num_classes
hm_t = np.zeros((num_hm, output_res, output_res), dtype=np.float32)
hm_l = np.zeros((num_hm, output_res, output_res), dtype=np.float32)
hm_b = np.zeros((num_hm, output_res, output_res), dtype=np.float32)
hm_r = np.zeros((num_hm, output_res, output_res), dtype=np.float32)
hm_c = np.zeros((num_classes, output_res, output_res), dtype=np.float32)
reg_t = np.zeros((self.max_objs, 2), dtype=np.float32)
reg_l = np.zeros((self.max_objs, 2), dtype=np.float32)
reg_b = np.zeros((self.max_objs, 2), dtype=np.float32)
reg_r = np.zeros((self.max_objs, 2), dtype=np.float32)
ind_t = np.zeros((self.max_objs), dtype=np.int64)
ind_l = np.zeros((self.max_objs), dtype=np.int64)
ind_b = np.zeros((self.max_objs), dtype=np.int64)
ind_r = np.zeros((self.max_objs), dtype=np.int64)
reg_mask = np.zeros((self.max_objs), dtype=np.uint8)
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
anns = self.coco.loadAnns(ids=ann_ids)
num_objs = min(len(anns), self.max_objs)
draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else \
draw_umich_gaussian
for k in range(num_objs):
ann = anns[k]
# bbox = self._coco_box_to_bbox(ann['bbox'])
# tlbr
pts = np.array(ann['extreme_points'], dtype=np.float32).reshape(4, 2)
# cls_id = int(self.cat_ids[ann['category_id']] - 1) # bug
cls_id = int(self.cat_ids[ann['category_id']])
hm_id = 0 if self.opt.agnostic_ex else cls_id
if flipped:
pts[:, 0] = width - pts[:, 0] - 1
pts[1], pts[3] = pts[3].copy(), pts[1].copy()
for j in range(4):
pts[j] = affine_transform(pts[j], trans_output)
pts = np.clip(pts, 0, self.opt.output_res - 1)
h, w = pts[2, 1] - pts[0, 1], pts[3, 0] - pts[1, 0]
if h > 0 and w > 0:
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
pt_int = pts.astype(np.int32)
draw_gaussian(hm_t[hm_id], pt_int[0], radius)
draw_gaussian(hm_l[hm_id], pt_int[1], radius)
draw_gaussian(hm_b[hm_id], pt_int[2], radius)
draw_gaussian(hm_r[hm_id], pt_int[3], radius)
reg_t[k] = pts[0] - pt_int[0]
reg_l[k] = pts[1] - pt_int[1]
reg_b[k] = pts[2] - pt_int[2]
reg_r[k] = pts[3] - pt_int[3]
ind_t[k] = pt_int[0, 1] * output_res + pt_int[0, 0]
ind_l[k] = pt_int[1, 1] * output_res + pt_int[1, 0]
ind_b[k] = pt_int[2, 1] * output_res + pt_int[2, 0]
ind_r[k] = pt_int[3, 1] * output_res + pt_int[3, 0]
ct = [int((pts[3, 0] + pts[1, 0]) / 2), int((pts[0, 1] + pts[2, 1]) / 2)]
draw_gaussian(hm_c[cls_id], ct, radius)
reg_mask[k] = 1
ret = {'input': inp, 'hm_t': hm_t, 'hm_l': hm_l, 'hm_b': hm_b, 'hm_r': hm_r, 'hm_c': hm_c}
if self.opt.reg_offset:
ret.update({
'reg_mask': reg_mask,
'reg_t': reg_t,
'reg_l': reg_l,
'reg_b': reg_b,
'reg_r': reg_r,
'ind_t': ind_t,
'ind_l': ind_l,
'ind_b': ind_b,
'ind_r': ind_r
})
return ret
| [
"math.ceil",
"numpy.random.randn",
"numpy.zeros",
"utils.image.color_aug",
"numpy.clip",
"cv2.imread",
"cv2.warpAffine",
"numpy.random.randint",
"numpy.array",
"numpy.random.random",
"utils.image.affine_transform",
"utils.image.get_affine_transform",
"numpy.arange",
"os.path.join"
] | [((556, 634), 'numpy.array', 'np.array', (['[box[0], box[1], box[0] + box[2], box[1] + box[3]]'], {'dtype': 'np.float32'}), '([box[0], box[1], box[0] + box[2], box[1] + box[3]], dtype=np.float32)\n', (564, 634), True, 'import numpy as np\n'), ((951, 1000), 'os.path.join', 'os.path.join', (['self.img_dir', "img_info['file_name']"], {}), "(self.img_dir, img_info['file_name'])\n", (963, 1000), False, 'import os\n'), ((1015, 1035), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (1025, 1035), False, 'import cv2\n'), ((1100, 1150), 'numpy.array', 'np.array', (['[img.shape[1] / 2.0, img.shape[0] / 2.0]'], {}), '([img.shape[1] / 2.0, img.shape[0] / 2.0])\n', (1108, 1150), True, 'import numpy as np\n'), ((2149, 2220), 'utils.image.get_affine_transform', 'get_affine_transform', (['c', 's', '(0)', '[self.opt.input_res, self.opt.input_res]'], {}), '(c, s, 0, [self.opt.input_res, self.opt.input_res])\n', (2169, 2220), False, 'from utils.image import get_affine_transform, affine_transform\n'), ((2235, 2337), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'trans_input', '(self.opt.input_res, self.opt.input_res)'], {'flags': 'cv2.INTER_LINEAR'}), '(img, trans_input, (self.opt.input_res, self.opt.input_res),\n flags=cv2.INTER_LINEAR)\n', (2249, 2337), False, 'import cv2\n'), ((2705, 2760), 'utils.image.get_affine_transform', 'get_affine_transform', (['c', 's', '(0)', '[output_res, output_res]'], {}), '(c, s, 0, [output_res, output_res])\n', (2725, 2760), False, 'from utils.image import get_affine_transform, affine_transform\n'), ((2837, 2897), 'numpy.zeros', 'np.zeros', (['(num_hm, output_res, output_res)'], {'dtype': 'np.float32'}), '((num_hm, output_res, output_res), dtype=np.float32)\n', (2845, 2897), True, 'import numpy as np\n'), ((2913, 2973), 'numpy.zeros', 'np.zeros', (['(num_hm, output_res, output_res)'], {'dtype': 'np.float32'}), '((num_hm, output_res, output_res), dtype=np.float32)\n', (2921, 2973), True, 'import numpy as np\n'), ((2989, 3049), 'numpy.zeros', 'np.zeros', (['(num_hm, output_res, output_res)'], {'dtype': 'np.float32'}), '((num_hm, output_res, output_res), dtype=np.float32)\n', (2997, 3049), True, 'import numpy as np\n'), ((3065, 3125), 'numpy.zeros', 'np.zeros', (['(num_hm, output_res, output_res)'], {'dtype': 'np.float32'}), '((num_hm, output_res, output_res), dtype=np.float32)\n', (3073, 3125), True, 'import numpy as np\n'), ((3141, 3206), 'numpy.zeros', 'np.zeros', (['(num_classes, output_res, output_res)'], {'dtype': 'np.float32'}), '((num_classes, output_res, output_res), dtype=np.float32)\n', (3149, 3206), True, 'import numpy as np\n'), ((3223, 3269), 'numpy.zeros', 'np.zeros', (['(self.max_objs, 2)'], {'dtype': 'np.float32'}), '((self.max_objs, 2), dtype=np.float32)\n', (3231, 3269), True, 'import numpy as np\n'), ((3286, 3332), 'numpy.zeros', 'np.zeros', (['(self.max_objs, 2)'], {'dtype': 'np.float32'}), '((self.max_objs, 2), dtype=np.float32)\n', (3294, 3332), True, 'import numpy as np\n'), ((3349, 3395), 'numpy.zeros', 'np.zeros', (['(self.max_objs, 2)'], {'dtype': 'np.float32'}), '((self.max_objs, 2), dtype=np.float32)\n', (3357, 3395), True, 'import numpy as np\n'), ((3412, 3458), 'numpy.zeros', 'np.zeros', (['(self.max_objs, 2)'], {'dtype': 'np.float32'}), '((self.max_objs, 2), dtype=np.float32)\n', (3420, 3458), True, 'import numpy as np\n'), ((3475, 3514), 'numpy.zeros', 'np.zeros', (['self.max_objs'], {'dtype': 'np.int64'}), '(self.max_objs, dtype=np.int64)\n', (3483, 3514), True, 'import numpy as np\n'), ((3533, 3572), 'numpy.zeros', 'np.zeros', (['self.max_objs'], {'dtype': 'np.int64'}), '(self.max_objs, dtype=np.int64)\n', (3541, 3572), True, 'import numpy as np\n'), ((3591, 3630), 'numpy.zeros', 'np.zeros', (['self.max_objs'], {'dtype': 'np.int64'}), '(self.max_objs, dtype=np.int64)\n', (3599, 3630), True, 'import numpy as np\n'), ((3649, 3688), 'numpy.zeros', 'np.zeros', (['self.max_objs'], {'dtype': 'np.int64'}), '(self.max_objs, dtype=np.int64)\n', (3657, 3688), True, 'import numpy as np\n'), ((3710, 3749), 'numpy.zeros', 'np.zeros', (['self.max_objs'], {'dtype': 'np.uint8'}), '(self.max_objs, dtype=np.uint8)\n', (3718, 3749), True, 'import numpy as np\n'), ((2456, 2516), 'utils.image.color_aug', 'color_aug', (['self._data_rng', 'inp', 'self._eig_val', 'self._eig_vec'], {}), '(self._data_rng, inp, self._eig_val, self._eig_vec)\n', (2465, 2516), False, 'from utils.image import flip, color_aug\n'), ((4675, 4715), 'numpy.clip', 'np.clip', (['pts', '(0)', '(self.opt.output_res - 1)'], {}), '(pts, 0, self.opt.output_res - 1)\n', (4682, 4715), True, 'import numpy as np\n'), ((1517, 1578), 'numpy.random.randint', 'np.random.randint', ([], {'low': 'w_border', 'high': '(img.shape[1] - w_border)'}), '(low=w_border, high=img.shape[1] - w_border)\n', (1534, 1578), True, 'import numpy as np\n'), ((1602, 1663), 'numpy.random.randint', 'np.random.randint', ([], {'low': 'h_border', 'high': '(img.shape[0] - h_border)'}), '(low=h_border, high=img.shape[0] - h_border)\n', (1619, 1663), True, 'import numpy as np\n'), ((2021, 2039), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2037, 2039), True, 'import numpy as np\n'), ((4618, 4656), 'utils.image.affine_transform', 'affine_transform', (['pts[j]', 'trans_output'], {}), '(pts[j], trans_output)\n', (4634, 4656), False, 'from utils.image import get_affine_transform, affine_transform\n'), ((4174, 4223), 'numpy.array', 'np.array', (["ann['extreme_points']"], {'dtype': 'np.float32'}), "(ann['extreme_points'], dtype=np.float32)\n", (4182, 4223), True, 'import numpy as np\n'), ((1342, 1366), 'numpy.arange', 'np.arange', (['(0.6)', '(1.4)', '(0.1)'], {}), '(0.6, 1.4, 0.1)\n', (1351, 1366), True, 'import numpy as np\n'), ((4854, 4866), 'math.ceil', 'math.ceil', (['h'], {}), '(h)\n', (4863, 4866), False, 'import math\n'), ((4868, 4880), 'math.ceil', 'math.ceil', (['w'], {}), '(w)\n', (4877, 4880), False, 'import math\n'), ((1877, 1894), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (1892, 1894), True, 'import numpy as np\n'), ((1965, 1982), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (1980, 1982), True, 'import numpy as np\n'), ((1786, 1803), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (1801, 1803), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
import gpflow
import os
plt.style.use('ggplot')
N = 12
X = np.random.rand(N,1)
Y = np.sin(12*X) + 0.66*np.cos(25*X) + np.random.randn(N,1)*0.1 + 3
k = gpflow.kernels.Matern52(1, lengthscales=0.3)
m = gpflow.models.GPR(X, Y, kern=k)
m.likelihood.variance = 0.01
m.compile()
def plot(m):
xx = np.linspace(-0.1, 1.1, 100)[:,None]
mean, var = m.predict_y(xx)
plt.figure(figsize=(12, 6))
plt.plot(X, Y, 'kx', mew=2)
plt.plot(xx, mean, 'b', lw=2)
plt.fill_between(xx[:,0], mean[:,0] - 2*np.sqrt(var[:,0]), mean[:,0] + 2*np.sqrt(var[:,0]), color='blue', alpha=0.2)
plt.xlim(-0.1, 1.1)
plot(m)
plt.savefig('first_regression.png') | [
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.plot",
"numpy.random.randn",
"gpflow.models.GPR",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.linspace",
"numpy.cos",
"numpy.random.rand",
"gpflow.kernels.Matern52",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] | [((81, 104), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (94, 104), True, 'import matplotlib.pyplot as plt\n'), ((120, 140), 'numpy.random.rand', 'np.random.rand', (['N', '(1)'], {}), '(N, 1)\n', (134, 140), True, 'import numpy as np\n'), ((216, 260), 'gpflow.kernels.Matern52', 'gpflow.kernels.Matern52', (['(1)'], {'lengthscales': '(0.3)'}), '(1, lengthscales=0.3)\n', (239, 260), False, 'import gpflow\n'), ((266, 297), 'gpflow.models.GPR', 'gpflow.models.GPR', (['X', 'Y'], {'kern': 'k'}), '(X, Y, kern=k)\n', (283, 297), False, 'import gpflow\n'), ((698, 733), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""first_regression.png"""'], {}), "('first_regression.png')\n", (709, 733), True, 'import matplotlib.pyplot as plt\n'), ((441, 468), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (451, 468), True, 'import matplotlib.pyplot as plt\n'), ((474, 501), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y', '"""kx"""'], {'mew': '(2)'}), "(X, Y, 'kx', mew=2)\n", (482, 501), True, 'import matplotlib.pyplot as plt\n'), ((507, 536), 'matplotlib.pyplot.plot', 'plt.plot', (['xx', 'mean', '"""b"""'], {'lw': '(2)'}), "(xx, mean, 'b', lw=2)\n", (515, 536), True, 'import matplotlib.pyplot as plt\n'), ((664, 683), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.1)', '(1.1)'], {}), '(-0.1, 1.1)\n', (672, 683), True, 'import matplotlib.pyplot as plt\n'), ((367, 394), 'numpy.linspace', 'np.linspace', (['(-0.1)', '(1.1)', '(100)'], {}), '(-0.1, 1.1, 100)\n', (378, 394), True, 'import numpy as np\n'), ((145, 159), 'numpy.sin', 'np.sin', (['(12 * X)'], {}), '(12 * X)\n', (151, 159), True, 'import numpy as np\n'), ((180, 201), 'numpy.random.randn', 'np.random.randn', (['N', '(1)'], {}), '(N, 1)\n', (195, 201), True, 'import numpy as np\n'), ((165, 179), 'numpy.cos', 'np.cos', (['(25 * X)'], {}), '(25 * X)\n', (171, 179), True, 'import numpy as np\n'), ((582, 600), 'numpy.sqrt', 'np.sqrt', (['var[:, 0]'], {}), '(var[:, 0])\n', (589, 600), True, 'import numpy as np\n'), ((615, 633), 'numpy.sqrt', 'np.sqrt', (['var[:, 0]'], {}), '(var[:, 0])\n', (622, 633), True, 'import numpy as np\n')] |
import numpy as np
def wls_eval(y, x, w=None):
"""
Method to evaluate error using weighted least squares method (WLS)
:param y: Array to evaluate error
:type y: numpy array (np.ndarray)
:param x: Array to evaluate error
:type x: numpy array (np.ndarray)
:param w: Weight array. If no argument is passed, weights are considered to be 1 (LS method)
:type w: numpy array (np.ndarray)
:return: WLS error
:rtype: float
"""
assert (y.shape == x.shape), "Arrays must have the same shape"
if w is None:
w = np.ones((y.shape[0], 1))
assert w.shape[0] == y.shape[0], "Weight array must have the same number of rows as y and x"
# Calculate step between each row of y
steps = []
for i in range(len(y) - 1):
steps.append(round(y[i + 1, 0] - y[i, 0], 5))
steps.append(steps[-1])
err = 0
for i, row in enumerate(w*(y - x)**2):
for diff in row:
err += steps[i]*diff
return 0.5*err
| [
"numpy.ones"
] | [((567, 591), 'numpy.ones', 'np.ones', (['(y.shape[0], 1)'], {}), '((y.shape[0], 1))\n', (574, 591), True, 'import numpy as np\n')] |
# ---- moveable.py -------------------------------------------------------------
import numpy as np
import os
import unittest
import h5py
from psgeom import moveable
def test_translation_matrix_from_vector():
x = np.random.randint(0,5,size=(3))
y = np.random.randint(0,5,size=(3))
yp = np.ones(4)
yp[:3] = y
T = moveable._translation_matrix_from_vector(x)
assert np.all(np.dot(T, yp)[:3] == x + y)
assert np.dot(T, yp)[3] == 1.0
def test_rotation_matrix_from_angles():
x = np.array([1.0, 0.0, 0.0]) # vector pointing at x
Rz = moveable._rotation_matrix_from_angles(90.0, 0.0, 0.0)
y = np.dot(Rz, x)
np.testing.assert_array_almost_equal(y, np.array([0.0, 1.0, 0.0]),
err_msg='Rz err')
Rx = moveable._rotation_matrix_from_angles(0.0, 0.0, 90.0)
z = np.dot(Rx, y)
np.testing.assert_array_almost_equal(z, np.array([0.0, 0.0, 1.0]),
err_msg='Rx err')
Ry = moveable._rotation_matrix_from_angles(0.0, -90.0, 0.0)
x = np.dot(Ry, z)
np.testing.assert_array_almost_equal(x, x, err_msg='Ry err')
def test_angle_retrieval():
for i in range(100):
alpha = np.random.rand() * 180.0
beta = np.random.rand() * 180.0
gamma = np.random.rand() * 180.0
R = moveable._rotation_matrix_from_angles(gamma, beta, alpha)
I = np.eye(3)
Ip = np.dot(R, I)
gp, bp, ap = moveable._angles_from_rotated_frame(Ip[:,0], Ip[:,1], Ip[:,2])
#print np.array([gamma, beta, alpha]), np.array([gp, bp, ap])
# test to make sure they rotate to the same thing
R2 = moveable._rotation_matrix_from_angles(gp, bp, ap)
assert np.sum( np.abs( R - R2 ) ) < 1e-12 | [
"psgeom.moveable._angles_from_rotated_frame",
"numpy.abs",
"numpy.eye",
"numpy.ones",
"psgeom.moveable._rotation_matrix_from_angles",
"numpy.random.randint",
"numpy.array",
"numpy.random.rand",
"numpy.dot",
"numpy.testing.assert_array_almost_equal",
"psgeom.moveable._translation_matrix_from_vect... | [((226, 257), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)'], {'size': '(3)'}), '(0, 5, size=3)\n', (243, 257), True, 'import numpy as np\n'), ((270, 301), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)'], {'size': '(3)'}), '(0, 5, size=3)\n', (287, 301), True, 'import numpy as np\n'), ((316, 326), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (323, 326), True, 'import numpy as np\n'), ((355, 398), 'psgeom.moveable._translation_matrix_from_vector', 'moveable._translation_matrix_from_vector', (['x'], {}), '(x)\n', (395, 398), False, 'from psgeom import moveable\n'), ((548, 573), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (556, 573), True, 'import numpy as np\n'), ((611, 664), 'psgeom.moveable._rotation_matrix_from_angles', 'moveable._rotation_matrix_from_angles', (['(90.0)', '(0.0)', '(0.0)'], {}), '(90.0, 0.0, 0.0)\n', (648, 664), False, 'from psgeom import moveable\n'), ((673, 686), 'numpy.dot', 'np.dot', (['Rz', 'x'], {}), '(Rz, x)\n', (679, 686), True, 'import numpy as np\n'), ((831, 884), 'psgeom.moveable._rotation_matrix_from_angles', 'moveable._rotation_matrix_from_angles', (['(0.0)', '(0.0)', '(90.0)'], {}), '(0.0, 0.0, 90.0)\n', (868, 884), False, 'from psgeom import moveable\n'), ((893, 906), 'numpy.dot', 'np.dot', (['Rx', 'y'], {}), '(Rx, y)\n', (899, 906), True, 'import numpy as np\n'), ((1051, 1105), 'psgeom.moveable._rotation_matrix_from_angles', 'moveable._rotation_matrix_from_angles', (['(0.0)', '(-90.0)', '(0.0)'], {}), '(0.0, -90.0, 0.0)\n', (1088, 1105), False, 'from psgeom import moveable\n'), ((1114, 1127), 'numpy.dot', 'np.dot', (['Ry', 'z'], {}), '(Ry, z)\n', (1120, 1127), True, 'import numpy as np\n'), ((1132, 1192), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['x', 'x'], {'err_msg': '"""Ry err"""'}), "(x, x, err_msg='Ry err')\n", (1168, 1192), True, 'import numpy as np\n'), ((731, 756), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (739, 756), True, 'import numpy as np\n'), ((951, 976), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (959, 976), True, 'import numpy as np\n'), ((1390, 1447), 'psgeom.moveable._rotation_matrix_from_angles', 'moveable._rotation_matrix_from_angles', (['gamma', 'beta', 'alpha'], {}), '(gamma, beta, alpha)\n', (1427, 1447), False, 'from psgeom import moveable\n'), ((1461, 1470), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1467, 1470), True, 'import numpy as np\n'), ((1484, 1496), 'numpy.dot', 'np.dot', (['R', 'I'], {}), '(R, I)\n', (1490, 1496), True, 'import numpy as np\n'), ((1519, 1584), 'psgeom.moveable._angles_from_rotated_frame', 'moveable._angles_from_rotated_frame', (['Ip[:, 0]', 'Ip[:, 1]', 'Ip[:, 2]'], {}), '(Ip[:, 0], Ip[:, 1], Ip[:, 2])\n', (1554, 1584), False, 'from psgeom import moveable\n'), ((1725, 1774), 'psgeom.moveable._rotation_matrix_from_angles', 'moveable._rotation_matrix_from_angles', (['gp', 'bp', 'ap'], {}), '(gp, bp, ap)\n', (1762, 1774), False, 'from psgeom import moveable\n'), ((461, 474), 'numpy.dot', 'np.dot', (['T', 'yp'], {}), '(T, yp)\n', (467, 474), True, 'import numpy as np\n'), ((1270, 1286), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1284, 1286), True, 'import numpy as np\n'), ((1311, 1327), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1325, 1327), True, 'import numpy as np\n'), ((1352, 1368), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1366, 1368), True, 'import numpy as np\n'), ((422, 435), 'numpy.dot', 'np.dot', (['T', 'yp'], {}), '(T, yp)\n', (428, 435), True, 'import numpy as np\n'), ((1799, 1813), 'numpy.abs', 'np.abs', (['(R - R2)'], {}), '(R - R2)\n', (1805, 1813), True, 'import numpy as np\n')] |
from __future__ import division, print_function
import abc
import numpy as np
from menpo.image import Image
from menpo.feature import sparse_hog
from menpo.visualize import print_dynamic, progress_bar_str
from menpofit.base import noisy_align, build_sampling_grid
from menpofit.fittingresult import (NonParametricFittingResult,
SemiParametricFittingResult,
ParametricFittingResult)
from .base import (NonParametricRegressor, SemiParametricRegressor,
ParametricRegressor)
from .parametricfeatures import extract_parametric_features, weights
from .regressors import mlr
class RegressorTrainer(object):
r"""
An abstract base class for training regressors.
Parameters
----------
reference_shape : :map:`PointCloud`
The reference shape that will be used.
regression_type : `callable`, optional
A `callable` that defines the regression technique to be used.
Examples of such callables can be found in
:ref:`regression_callables`
regression_features : ``None`` or `string` or `function`, optional
The features that are used during the regression.
noise_std : `float`, optional
The standard deviation of the gaussian noise used to produce the
training shapes.
rotation : boolean, optional
Specifies whether ground truth in-plane rotation is to be used
to produce the training shapes.
n_perturbations : `int`, optional
Defines the number of perturbations that will be applied to the
training shapes.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, reference_shape, regression_type=mlr,
regression_features=None, noise_std=0.04, rotation=False,
n_perturbations=10):
self.reference_shape = reference_shape
self.regression_type = regression_type
self.regression_features = regression_features
self.rotation = rotation
self.noise_std = noise_std
self.n_perturbations = n_perturbations
def _regression_data(self, images, gt_shapes, perturbed_shapes,
verbose=False):
r"""
Method that generates the regression data : features and delta_ps.
Parameters
----------
images : list of :map:`MaskedImage`
The set of landmarked images.
gt_shapes : :map:`PointCloud` list
List of the ground truth shapes that correspond to the images.
perturbed_shapes : :map:`PointCloud` list
List of the perturbed shapes in order to regress.
verbose : `boolean`, optional
If ``True``, the progress is printed.
"""
if verbose:
print_dynamic('- Generating regression data')
n_images = len(images)
features = []
delta_ps = []
for j, (i, s, p_shape) in enumerate(zip(images, gt_shapes,
perturbed_shapes)):
if verbose:
print_dynamic('- Generating regression data - {}'.format(
progress_bar_str((j + 1.) / n_images, show_bar=False)))
for ps in p_shape:
features.append(self.features(i, ps))
delta_ps.append(self.delta_ps(s, ps))
return np.asarray(features), np.asarray(delta_ps)
@abc.abstractmethod
def features(self, image, shape):
r"""
Abstract method to generate the features for the regression.
Parameters
----------
image : :map:`MaskedImage`
The current image.
shape : :map:`PointCloud`
The current shape.
"""
pass
@abc.abstractmethod
def delta_ps(self, gt_shape, perturbed_shape):
r"""
Abstract method to generate the delta_ps for the regression.
Parameters
----------
gt_shape : :map:`PointCloud`
The ground truth shape.
perturbed_shape : :map:`PointCloud`
The perturbed shape.
"""
pass
def train(self, images, shapes, perturbed_shapes=None, verbose=False,
**kwargs):
r"""
Trains a Regressor given a list of landmarked images.
Parameters
----------
images : list of :map:`MaskedImage`
The set of landmarked images from which to train the regressor.
shapes : :map:`PointCloud` list
List of the shapes that correspond to the images.
perturbed_shapes : :map:`PointCloud` list, optional
List of the perturbed shapes used for the regressor training.
verbose : `boolean`, optional
Flag that controls information and progress printing.
Returns
-------
regressor : :map:`Regressor`
A regressor object.
Raises
------
ValueError
The number of shapes must be equal to the number of images.
ValueError
The number of perturbed shapes must be equal or multiple to
the number of images.
"""
n_images = len(images)
n_shapes = len(shapes)
# generate regression data
if n_images != n_shapes:
raise ValueError("The number of shapes must be equal to "
"the number of images.")
elif not perturbed_shapes:
perturbed_shapes = self.perturb_shapes(shapes)
features, delta_ps = self._regression_data(
images, shapes, perturbed_shapes, verbose=verbose)
elif n_images == len(perturbed_shapes):
features, delta_ps = self._regression_data(
images, shapes, perturbed_shapes, verbose=verbose)
else:
raise ValueError("The number of perturbed shapes must be "
"equal or multiple to the number of images.")
# perform regression
if verbose:
print_dynamic('- Performing regression...')
# Expected to be a callable
regressor = self.regression_type(features, delta_ps, **kwargs)
# compute regressor RMSE
estimated_delta_ps = regressor(features)
error = np.sqrt(np.mean(np.sum((delta_ps - estimated_delta_ps) ** 2,
axis=1)))
if verbose:
print_dynamic('- Regression RMSE is {0:.5f}.\n'.format(error))
return self._build_regressor(regressor, self.features)
def perturb_shapes(self, gt_shape):
r"""
Perturbs the given shapes. The number of perturbations is defined by
``n_perturbations``.
Parameters
----------
gt_shape : :map:`PointCloud` list
List of the shapes that correspond to the images.
will be perturbed.
Returns
-------
perturbed_shapes : :map:`PointCloud` list
List of the perturbed shapes.
"""
return [[self._perturb_shape(s) for _ in range(self.n_perturbations)]
for s in gt_shape]
def _perturb_shape(self, gt_shape):
r"""
Method that performs noisy alignment between the given ground truth
shape and the reference shape.
Parameters
----------
gt_shape : :map:`PointCloud`
The ground truth shape.
"""
return noisy_align(self.reference_shape, gt_shape,
noise_std=self.noise_std
).apply(self.reference_shape)
@abc.abstractmethod
def _build_regressor(self, regressor, features):
r"""
Abstract method to build a regressor model.
"""
pass
class NonParametricRegressorTrainer(RegressorTrainer):
r"""
Class for training a Non-Parametric Regressor.
Parameters
----------
reference_shape : :map:`PointCloud`
The reference shape that will be used.
regression_type : `callable`, optional
A `callable` that defines the regression technique to be used.
Examples of such callables can be found in
:ref:`regression_callables`
regression_features : `function`, optional
The features that are used during the regression.
See `menpo.features` for details more details on
Menpo's standard image features and feature options.
See :ref:`feature_functions` for non standard
features definitions.
patch_shape : tuple, optional
The shape of the patches that will be extracted.
noise_std : `float`, optional
The standard deviation of the gaussian noise used to produce the
training shapes.
rotation : `boolean`, optional
Specifies whether ground truth in-plane rotation is to be used
to produce the training shapes.
n_perturbations : `int`, optional
Defines the number of perturbations that will be applied to the
training shapes.
"""
def __init__(self, reference_shape, regression_type=mlr,
regression_features=sparse_hog, patch_shape=(16, 16),
noise_std=0.04, rotation=False, n_perturbations=10):
super(NonParametricRegressorTrainer, self).__init__(
reference_shape, regression_type=regression_type,
regression_features=regression_features, noise_std=noise_std,
rotation=rotation, n_perturbations=n_perturbations)
self.patch_shape = patch_shape
self._set_up()
def _set_up(self):
# work out feature length per patch
patch_img = Image.init_blank(self.patch_shape, fill=0)
self._feature_patch_length = self.regression_features(patch_img).n_parameters
@property
def algorithm(self):
r"""
Returns the algorithm name.
"""
return "Non-Parametric"
def _create_fitting(self, image, shapes, gt_shape=None):
r"""
Method that creates the fitting result object.
Parameters
----------
image : :map:`MaskedImage`
The image object.
shapes : :map:`PointCloud` list
The shapes.
gt_shape : :map:`PointCloud`
The ground truth shape.
"""
return NonParametricFittingResult(image, self, parameters=[shapes],
gt_shape=gt_shape)
def features(self, image, shape):
r"""
Method that extracts the features for the regression, which in this
case are patch based.
Parameters
----------
image : :map:`MaskedImage`
The current image.
shape : :map:`PointCloud`
The current shape.
"""
# extract patches
patches = image.extract_patches(shape, patch_size=self.patch_shape)
features = np.zeros((shape.n_points, self._feature_patch_length))
for j, patch in enumerate(patches):
# compute features
features[j, ...] = self.regression_features(patch).as_vector()
return np.hstack((features.ravel(), 1))
def delta_ps(self, gt_shape, perturbed_shape):
r"""
Method to generate the delta_ps for the regression.
Parameters
----------
gt_shape : :map:`PointCloud`
The ground truth shape.
perturbed_shape : :map:`PointCloud`
The perturbed shape.
"""
return (gt_shape.as_vector() -
perturbed_shape.as_vector())
def _build_regressor(self, regressor, features):
r"""
Method to build the NonParametricRegressor regressor object.
"""
return NonParametricRegressor(regressor, features)
class SemiParametricRegressorTrainer(NonParametricRegressorTrainer):
r"""
Class for training a Semi-Parametric Regressor.
This means that a parametric shape model and a non-parametric appearance
representation are employed.
Parameters
----------
reference_shape : PointCloud
The reference shape that will be used.
regression_type : `callable`, optional
A `callable` that defines the regression technique to be used.
Examples of such callables can be found in
:ref:`regression_callables`
regression_features : `function`, optional
The features that are used during the regression.
See :ref:`menpo.features` for details more details on
Menpos standard image features and feature options.
patch_shape : tuple, optional
The shape of the patches that will be extracted.
update : 'compositional' or 'additive'
Defines the way to update the warp.
noise_std : `float`, optional
The standard deviation of the gaussian noise used to produce the
training shapes.
rotation : `boolean`, optional
Specifies whether ground truth in-plane rotation is to be used
to produce the training shapes.
n_perturbations : `int`, optional
Defines the number of perturbations that will be applied to the
training shapes.
"""
def __init__(self, transform, reference_shape, regression_type=mlr,
regression_features=sparse_hog, patch_shape=(16, 16),
update='compositional', noise_std=0.04, rotation=False,
n_perturbations=10):
super(SemiParametricRegressorTrainer, self).__init__(
reference_shape, regression_type=regression_type,
regression_features=regression_features, patch_shape=patch_shape,
noise_std=noise_std, rotation=rotation,
n_perturbations=n_perturbations)
self.transform = transform
self.update = update
@property
def algorithm(self):
r"""
Returns the algorithm name.
"""
return "Semi-Parametric"
def _create_fitting(self, image, shapes, gt_shape=None):
r"""
Method that creates the fitting result object.
Parameters
----------
image : :map:`MaskedImage`
The image object.
shapes : :map:`PointCloud` list
The shapes.
gt_shape : :map:`PointCloud`
The ground truth shape.
"""
return SemiParametricFittingResult(image, self, parameters=[shapes],
gt_shape=gt_shape)
def delta_ps(self, gt_shape, perturbed_shape):
r"""
Method to generate the delta_ps for the regression.
Parameters
----------
gt_shape : :map:`PointCloud`
The ground truth shape.
perturbed_shape : :map:`PointCloud`
The perturbed shape.
"""
self.transform.set_target(gt_shape)
gt_ps = self.transform.as_vector()
self.transform.set_target(perturbed_shape)
perturbed_ps = self.transform.as_vector()
return gt_ps - perturbed_ps
def _build_regressor(self, regressor, features):
r"""
Method to build the NonParametricRegressor regressor object.
"""
return SemiParametricRegressor(regressor, features, self.transform,
self.update)
class ParametricRegressorTrainer(RegressorTrainer):
r"""
Class for training a Parametric Regressor.
Parameters
----------
appearance_model : :map:`PCAModel`
The appearance model to be used.
transform : :map:`Affine`
The transform used for warping.
reference_shape : :map:`PointCloud`
The reference shape that will be used.
regression_type : `callable`, optional
A `callable` that defines the regression technique to be used.
Examples of such callables can be found in
:ref:`regression_callables`
regression_features : ``None`` or `function`, optional
The parametric features that are used during the regression.
If ``None``, the reconstruction appearance weights will be used as
feature.
If `string` or `function`, the feature representation will be
computed using one of the function in:
If `string`, the feature representation will be extracted by
executing a parametric feature function.
Note that this feature type can only be one of the parametric
feature functions defined :ref:`parametric_features`.
patch_shape : tuple, optional
The shape of the patches that will be extracted.
update : 'compositional' or 'additive'
Defines the way to update the warp.
noise_std : `float`, optional
The standard deviation of the gaussian noise used to produce the
training shapes.
rotation : `boolean`, optional
Specifies whether ground truth in-plane rotation is to be used
to produce the training shapes.
n_perturbations : `int`, optional
Defines the number of perturbations that will be applied to the
training shapes.
"""
def __init__(self, appearance_model, transform, reference_shape,
regression_type=mlr, regression_features=weights,
update='compositional', noise_std=0.04, rotation=False,
n_perturbations=10):
super(ParametricRegressorTrainer, self).__init__(
reference_shape, regression_type=regression_type,
regression_features=regression_features, noise_std=noise_std,
rotation=rotation, n_perturbations=n_perturbations)
self.appearance_model = appearance_model
self.template = appearance_model.mean()
self.regression_features = regression_features
self.transform = transform
self.update = update
@property
def algorithm(self):
r"""
Returns the algorithm name.
"""
return "Parametric"
def _create_fitting(self, image, shapes, gt_shape=None):
r"""
Method that creates the fitting result object.
Parameters
----------
image : :map:`MaskedImage`
The image object.
shapes : :map:`PointCloud` list
The shapes.
gt_shape : :map:`PointCloud`
The ground truth shape.
"""
return ParametricFittingResult(image, self, parameters=[shapes],
gt_shape=gt_shape)
def features(self, image, shape):
r"""
Method that extracts the features for the regression, which in this
case are patch based.
Parameters
----------
image : :map:`MaskedImage`
The current image.
shape : :map:`PointCloud`
The current shape.
"""
self.transform.set_target(shape)
# TODO should the template be a mask or a shape? warp_to_shape here
warped_image = image.warp_to_mask(self.template.mask, self.transform,
warp_landmarks=False)
features = extract_parametric_features(
self.appearance_model, warped_image, self.regression_features)
return np.hstack((features, 1))
def delta_ps(self, gt_shape, perturbed_shape):
r"""
Method to generate the delta_ps for the regression.
Parameters
----------
gt_shape : :map:`PointCloud`
The ground truth shape.
perturbed_shape : :map:`PointCloud`
The perturbed shape.
"""
self.transform.set_target(gt_shape)
gt_ps = self.transform.as_vector()
self.transform.set_target(perturbed_shape)
perturbed_ps = self.transform.as_vector()
return gt_ps - perturbed_ps
def _build_regressor(self, regressor, features):
r"""
Method to build the NonParametricRegressor regressor object.
"""
return ParametricRegressor(
regressor, features, self.appearance_model, self.transform,
self.update)
class SemiParametricClassifierBasedRegressorTrainer(
SemiParametricRegressorTrainer):
r"""
Class for training a Semi-Parametric Classifier-Based Regressor. This means
that the classifiers are used instead of features.
Parameters
----------
classifiers : list of :map:`classifiers`
List of classifiers.
transform : :map:`Affine`
The transform used for warping.
reference_shape : :map:`PointCloud`
The reference shape that will be used.
regression_type : `callable`, optional
A `callable` that defines the regression technique to be used.
Examples of such callables can be found in
:ref:`regression_callables`
patch_shape : tuple, optional
The shape of the patches that will be extracted.
noise_std : `float`, optional
The standard deviation of the gaussian noise used to produce the
training shapes.
rotation : `boolean`, optional
Specifies whether ground truth in-plane rotation is to be used
to produce the training shapes.
n_perturbations : `int`, optional
Defines the number of perturbations that will be applied to the
training shapes.
"""
def __init__(self, classifiers, transform, reference_shape,
regression_type=mlr, patch_shape=(16, 16),
update='compositional', noise_std=0.04, rotation=False,
n_perturbations=10):
super(SemiParametricClassifierBasedRegressorTrainer, self).__init__(
transform, reference_shape, regression_type=regression_type,
patch_shape=patch_shape, update=update,
noise_std=noise_std, rotation=rotation,
n_perturbations=n_perturbations)
self.classifiers = classifiers
def _set_up(self):
# TODO: CLMs should use slices instead of sampling grid, and the
# need of the _set_up method will probably disappear
# set up sampling grid
self.sampling_grid = build_sampling_grid(self.patch_shape)
def features(self, image, shape):
r"""
Method that extracts the features for the regression, which in this
case are patch based.
Parameters
----------
image : :map:`MaskedImage`
The current image.
shape : :map:`PointCloud`
The current shape.
"""
patches = image.extract_patches(shape, patch_size=self.patch_shape)
features = [clf(patch.as_vector(keep_channels=True))
for (clf, patch) in zip(self.classifiers, patches)]
return np.hstack((np.asarray(features).ravel(), 1))
| [
"numpy.sum",
"numpy.asarray",
"numpy.zeros",
"menpo.image.Image.init_blank",
"menpofit.base.build_sampling_grid",
"numpy.hstack",
"menpofit.fittingresult.ParametricFittingResult",
"menpo.visualize.progress_bar_str",
"menpofit.fittingresult.NonParametricFittingResult",
"menpofit.fittingresult.SemiP... | [((9627, 9669), 'menpo.image.Image.init_blank', 'Image.init_blank', (['self.patch_shape'], {'fill': '(0)'}), '(self.patch_shape, fill=0)\n', (9643, 9669), False, 'from menpo.image import Image\n'), ((10289, 10368), 'menpofit.fittingresult.NonParametricFittingResult', 'NonParametricFittingResult', (['image', 'self'], {'parameters': '[shapes]', 'gt_shape': 'gt_shape'}), '(image, self, parameters=[shapes], gt_shape=gt_shape)\n', (10315, 10368), False, 'from menpofit.fittingresult import NonParametricFittingResult, SemiParametricFittingResult, ParametricFittingResult\n'), ((10874, 10928), 'numpy.zeros', 'np.zeros', (['(shape.n_points, self._feature_patch_length)'], {}), '((shape.n_points, self._feature_patch_length))\n', (10882, 10928), True, 'import numpy as np\n'), ((14280, 14365), 'menpofit.fittingresult.SemiParametricFittingResult', 'SemiParametricFittingResult', (['image', 'self'], {'parameters': '[shapes]', 'gt_shape': 'gt_shape'}), '(image, self, parameters=[shapes], gt_shape=gt_shape\n )\n', (14307, 14365), False, 'from menpofit.fittingresult import NonParametricFittingResult, SemiParametricFittingResult, ParametricFittingResult\n'), ((18267, 18343), 'menpofit.fittingresult.ParametricFittingResult', 'ParametricFittingResult', (['image', 'self'], {'parameters': '[shapes]', 'gt_shape': 'gt_shape'}), '(image, self, parameters=[shapes], gt_shape=gt_shape)\n', (18290, 18343), False, 'from menpofit.fittingresult import NonParametricFittingResult, SemiParametricFittingResult, ParametricFittingResult\n'), ((19121, 19145), 'numpy.hstack', 'np.hstack', (['(features, 1)'], {}), '((features, 1))\n', (19130, 19145), True, 'import numpy as np\n'), ((21985, 22022), 'menpofit.base.build_sampling_grid', 'build_sampling_grid', (['self.patch_shape'], {}), '(self.patch_shape)\n', (22004, 22022), False, 'from menpofit.base import noisy_align, build_sampling_grid\n'), ((2781, 2826), 'menpo.visualize.print_dynamic', 'print_dynamic', (['"""- Generating regression data"""'], {}), "('- Generating regression data')\n", (2794, 2826), False, 'from menpo.visualize import print_dynamic, progress_bar_str\n'), ((3366, 3386), 'numpy.asarray', 'np.asarray', (['features'], {}), '(features)\n', (3376, 3386), True, 'import numpy as np\n'), ((3388, 3408), 'numpy.asarray', 'np.asarray', (['delta_ps'], {}), '(delta_ps)\n', (3398, 3408), True, 'import numpy as np\n'), ((6026, 6069), 'menpo.visualize.print_dynamic', 'print_dynamic', (['"""- Performing regression..."""'], {}), "('- Performing regression...')\n", (6039, 6069), False, 'from menpo.visualize import print_dynamic, progress_bar_str\n'), ((6292, 6344), 'numpy.sum', 'np.sum', (['((delta_ps - estimated_delta_ps) ** 2)'], {'axis': '(1)'}), '((delta_ps - estimated_delta_ps) ** 2, axis=1)\n', (6298, 6344), True, 'import numpy as np\n'), ((7436, 7505), 'menpofit.base.noisy_align', 'noisy_align', (['self.reference_shape', 'gt_shape'], {'noise_std': 'self.noise_std'}), '(self.reference_shape, gt_shape, noise_std=self.noise_std)\n', (7447, 7505), False, 'from menpofit.base import noisy_align, build_sampling_grid\n'), ((3156, 3210), 'menpo.visualize.progress_bar_str', 'progress_bar_str', (['((j + 1.0) / n_images)'], {'show_bar': '(False)'}), '((j + 1.0) / n_images, show_bar=False)\n', (3172, 3210), False, 'from menpo.visualize import print_dynamic, progress_bar_str\n'), ((22599, 22619), 'numpy.asarray', 'np.asarray', (['features'], {}), '(features)\n', (22609, 22619), True, 'import numpy as np\n')] |
# Copyright (c) OpenMMLab. All rights reserved.
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]="2,3"
import argparse
import os
import os.path as osp
import time
import warnings
import mmcv
from numpy.core.fromnumeric import argmax
import torch
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from mmdet.apis import multi_gpu_test
from mmdet.datasets import (build_dataloader, build_dataset,
replace_ImageToTensor)
from mmdet.models import build_detector
from mmcv.image import tensor2imgs
from mmcv.runner import get_dist_info
from mmdet.core import encode_mask_results
import numpy as np
from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps
from sklearn.metrics import classification_report
from pycocotools.cocoeval import COCOeval
from mmdet.core.bbox.assigners import MaxIoUAssigner
def single_gpu_test(model,
data_loader,
show=False,
out_dir=None,
show_score_thr=0.3):
model.eval()
results = []
orientation_results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result, orientation_result = model(return_loss=False, rescale=True, **data)
batch_size = len(result)
if show or out_dir:
if batch_size == 1 and isinstance(data['img'][0], torch.Tensor):
img_tensor = data['img'][0]
else:
img_tensor = data['img'][0].data[0]
img_metas = data['img_metas'][0].data[0]
imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
assert len(imgs) == len(img_metas)
for i, (img, img_meta) in enumerate(zip(imgs, img_metas)):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
ori_h, ori_w = img_meta['ori_shape'][:-1]
img_show = mmcv.imresize(img_show, (ori_w, ori_h))
if out_dir:
out_file = osp.join(out_dir, img_meta['ori_filename'])
else:
out_file = None
model.module.show_result(
img_show,
result[i],
show=show,
out_file=out_file,
score_thr=show_score_thr)
# encode mask results
if isinstance(result[0], tuple):
result = [(bbox_results, encode_mask_results(mask_results))
for bbox_results, mask_results in result]
results.extend(result)
orientation_results.extend(orientation_result)
for _ in range(batch_size):
prog_bar.update()
return results, orientation_results
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing evaluation metrics')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--show-score-thr',
type=float,
default=0.3,
help='score threshold (default: 0.3)')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function (deprecate), '
'change to --eval-options instead.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.eval_options:
raise ValueError(
'--options and --eval-options cannot be both '
'specified, --options is deprecated in favor of --eval-options')
if args.options:
warnings.warn('--options is deprecated in favor of --eval-options')
args.eval_options = args.options
return args
def main():
args = parse_args()
# assert args.out or args.eval or args.format_only or args.show \
# or args.show_dir, \
# ('Please specify at least one operation (save/eval/format/show the '
# 'results / save the results) with the argument "--out", "--eval"'
# ', "--format-only", "--show" or "--show-dir"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
if cfg.model.get('neck'):
if isinstance(cfg.model.neck, list):
for neck_cfg in cfg.model.neck:
if neck_cfg.get('rfp_backbone'):
if neck_cfg.rfp_backbone.get('pretrained'):
neck_cfg.rfp_backbone.pretrained = None
elif cfg.model.neck.get('rfp_backbone'):
if cfg.model.neck.rfp_backbone.get('pretrained'):
cfg.model.neck.rfp_backbone.pretrained = None
# in case the test dataset is concatenated
samples_per_gpu = 1
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
if samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(
cfg.data.test.pipeline)
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
samples_per_gpu = max(
[ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test])
if samples_per_gpu > 1:
for ds_cfg in cfg.data.test:
ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline)
print(f"\n#### samples_per_gpu: {samples_per_gpu}\n")
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
rank, _ = get_dist_info()
# allows not to create
if args.work_dir is not None and rank == 0:
mmcv.mkdir_or_exist(osp.abspath(args.work_dir))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
json_file = osp.join(args.work_dir, f'eval_{timestamp}.json')
# build the dataloader
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint.get('meta', {}):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs, orientation_results = single_gpu_test(model, data_loader, args.show, args.show_dir,
args.show_score_thr)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect)
rank, _ = get_dist_info()
if rank == 0:
if args.out:
print(f'\nwriting results to {args.out}')
mmcv.dump(outputs, args.out)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule'
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
metric = dataset.evaluate(outputs, **eval_kwargs)
print(metric)
metric_dict = dict(config=args.config, metric=metric)
if args.work_dir is not None and rank == 0:
mmcv.dump(metric_dict, json_file)
#####################################################################
# orientation evaluation
#####################################################################
accepted_proposals = 0 # positive w.r.t to threshold on bbox score
discarded_proposals = 0 # positive w.r.t to threshold on bbox score
strong_proposals = 0 # if overlap with gt > gt_overlap_threshold
weak_proposals = 0 # if overlap with gt < gt_overlap_threshold
bbox_threshold = 0.5
gt_overlap_threshold = 0.5
bbox_results_with_gt = []
cocoGt = dataset.coco
imgIds = cocoGt.getImgIds()
print(f"\nlen img ids: {len(imgIds)}")
print(f"len orientation results: {len(orientation_results)}")
pred_ori = []
gt_ori = []
mint_pred_ori = []
mint_gt_ori = []
for idx in range(len(dataset)):
img_id = dataset.data_infos[idx]['id']
gt_ann_ids = cocoGt.get_ann_ids(img_ids=[img_id])
gt_ann_info = cocoGt.load_anns(gt_ann_ids)
gt_ann = dataset._parse_ann_info(dataset.data_infos[idx], gt_ann_info)
# img_id = dataset.img_ids[idx]
# gt_ann_ids = cocoGt.getAnnIds(imgIds=[img_id])
# gt_ann_info = cocoGt.loadAnns(gt_ann_ids)
# gt_ann = dataset._parse_ann_info(gt_ann_info)
gt_bboxes = np.array([gt_bbox for gt_bbox in gt_ann['bboxes']])
# check_overlaps_with_gt = np.zeros(len(gt_bboxes))
selected_pred = np.full(len(gt_bboxes), -1)
selected_pred_max = np.zeros(len(gt_bboxes))
if len(outputs[idx]) == 1:
det = outputs[idx]
else:
det, _ = outputs[idx]
ori = orientation_results[idx]
# only one label (person)
bboxes = det[0]
# print(f"\nimage {img_id} with {len([bb for bb in bboxes if bb[4]>=bbox_threshold])} valid proposals")
for i in range(bboxes.shape[0]):
bbox_score = float(bboxes[i][4])
if bbox_score >= bbox_threshold:
accepted_proposals += 1
pred_bbox = np.array([bboxes[i][:4]])
overlaps = bbox_overlaps(pred_bbox, gt_bboxes)
max_idx = np.argmax(overlaps)
if np.max(overlaps) < gt_overlap_threshold:
weak_proposals += 1
else:
strong_proposals += 1
# check_overlaps_with_gt[max_idx] = 1
if np.max(overlaps) > selected_pred_max[max_idx]:
selected_pred[max_idx] = i
selected_pred_max[max_idx] = np.max(overlaps)
gt_ori.append(int(np.argmax(gt_ann['orientations'][max_idx])))
pred_ori.append(int(np.argmax(ori[i])))
# for saving the detected bboxes as a new dataset for mebow testing
res_dict = dict()
res_dict['image_id'] = img_id
res_dict['bbox'] = xyxy2xywh(bboxes[i])
res_dict['score'] = bbox_score
res_dict['category_id'] = dataset.cat_ids[0] # only person label
res_dict['orientation'] = int(np.argmax(ori[i]))
res_dict['orientation_score'] = float(np.max(ori[i]))
res_dict['gt_annotation_id'] = gt_ann_ids[max_idx]
gt_ann_original = cocoGt.loadAnns(gt_ann_ids[max_idx])
res_dict['gt_orientation'] = gt_ann_original[0]["orientation"]
bbox_results_with_gt.append(res_dict)
else:
discarded_proposals += 1
# if not check_overlaps_with_gt.all():
# print(f"GT bboxes not covered for img {img_id}")
# exit()
for ix, sp in enumerate(selected_pred):
if sp >= 0:
mint_pred_ori.append(int(np.argmax(ori[sp])))
mint_gt_ori.append(int(np.argmax(gt_ann["orientations"][ix])))
# if not (selected_pred >= 0).all():
# print(f"!!!GT bboxes not covered for img {img_id}!!!")
# exit()
# if len(set(selected_pred)) != len(gt_bboxes):
# print(f"Duplicated proposals")
# exit()
if pred_ori:
print("\nComplete classification report\n")
print(classification_report(gt_ori, pred_ori))
accc = calc_acc(gt_ori, pred_ori)
accc_metrics = ["result", "excellent_5", "mid_15", "poor_225", "poor_30", "poor_45"]
for accc_metric, accc_result in zip(accc_metrics, accc):
print(f"{accc_metric}: {round(accc_result, 2)}")
gt_ori_4class = to_4_classes(gt_ori)
pred_ori_4class = to_4_classes(pred_ori)
print("\n4 class classification report\n")
print(classification_report(gt_ori_4class, pred_ori_4class))
else:
print("No positive bboxes")
if mint_pred_ori:
print("\nMint classification report\n")
print(classification_report(mint_gt_ori, mint_pred_ori))
print(f"accepted proposals: {accepted_proposals}")
print(f"rejected proposals: {discarded_proposals}")
print(f"weak proposals: {weak_proposals}")
print(f"strong proposals: {strong_proposals}")
# print(f"len(bbox_results_with_gt): {len(bbox_results_with_gt)}")
# mmcv.dump(bbox_results_with_gt, osp.join(cfg.work_dir, f'bbox_results_with_gt_{int(bbox_threshold*10)}.json'))
def xyxy2xywh(bbox):
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0] + 1,
_bbox[3] - _bbox[1] + 1,
]
def calc_acc(gt_ori, pred_ori):
tot = len(pred_ori)
result = 0
excellent = 0
mid = 0
poor_225 = 0
poor = 0
poor_45 = 0
for i in range(tot):
diff = abs(pred_ori[i] - gt_ori[i]) * 5
diff = min(diff, 360 - diff)
result += diff
if diff <= 45:
poor_45 += 1
if diff <= 30:
poor += 1
if diff <= 22.5:
poor_225 += 1
if diff <= 15:
mid += 1
if diff <= 5:
excellent += 1
return [result/tot, excellent/tot, mid/tot, poor_225/tot, poor/tot, poor_45/tot]
def to_4_classes(degree_array):
new_deg_array = []
for el in degree_array:
if int(el*5) <= 45 or int(el*5) > 315:
new_deg_array.append(1)
elif 45 < int(el*5) <= 135:
new_deg_array.append(2)
elif 135 < int(el*5) <= 225:
new_deg_array.append(3)
elif 225 < int(el)*5 <= 315:
new_deg_array.append(4)
return np.array(new_deg_array)
if __name__ == '__main__':
main()
| [
"mmcv.runner.get_dist_info",
"mmdet.datasets.replace_ImageToTensor",
"argparse.ArgumentParser",
"numpy.argmax",
"mmcv.image.tensor2imgs",
"sklearn.metrics.classification_report",
"mmcv.utils.import_modules_from_strings",
"mmcv.Config.fromfile",
"mmcv.dump",
"torch.cuda.current_device",
"torch.no... | [((3098, 3166), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""MMDet test (and eval) a model"""'}), "(description='MMDet test (and eval) a model')\n", (3121, 3166), False, 'import argparse\n'), ((7188, 7216), 'mmcv.Config.fromfile', 'Config.fromfile', (['args.config'], {}), '(args.config)\n', (7203, 7216), False, 'from mmcv import Config, DictAction\n'), ((9192, 9207), 'mmcv.runner.get_dist_info', 'get_dist_info', ([], {}), '()\n', (9205, 9207), False, 'from mmcv.runner import get_dist_info\n'), ((9520, 9548), 'mmdet.datasets.build_dataset', 'build_dataset', (['cfg.data.test'], {}), '(cfg.data.test)\n', (9533, 9548), False, 'from mmdet.datasets import build_dataloader, build_dataset, replace_ImageToTensor\n'), ((9567, 9705), 'mmdet.datasets.build_dataloader', 'build_dataloader', (['dataset'], {'samples_per_gpu': 'samples_per_gpu', 'workers_per_gpu': 'cfg.data.workers_per_gpu', 'dist': 'distributed', 'shuffle': '(False)'}), '(dataset, samples_per_gpu=samples_per_gpu, workers_per_gpu=\n cfg.data.workers_per_gpu, dist=distributed, shuffle=False)\n', (9583, 9705), False, 'from mmdet.datasets import build_dataloader, build_dataset, replace_ImageToTensor\n'), ((9998, 10057), 'mmcv.runner.load_checkpoint', 'load_checkpoint', (['model', 'args.checkpoint'], {'map_location': '"""cpu"""'}), "(model, args.checkpoint, map_location='cpu')\n", (10013, 10057), False, 'from mmcv.runner import get_dist_info, init_dist, load_checkpoint, wrap_fp16_model\n'), ((10920, 10935), 'mmcv.runner.get_dist_info', 'get_dist_info', ([], {}), '()\n', (10933, 10935), False, 'from mmcv.runner import get_dist_info\n'), ((18917, 18940), 'numpy.array', 'np.array', (['new_deg_array'], {}), '(new_deg_array)\n', (18925, 18940), True, 'import numpy as np\n'), ((6440, 6507), 'warnings.warn', 'warnings.warn', (['"""--options is deprecated in favor of --eval-options"""'], {}), "('--options is deprecated in favor of --eval-options')\n", (6453, 6507), False, 'import warnings\n'), ((7446, 7498), 'mmcv.utils.import_modules_from_strings', 'import_modules_from_strings', ([], {}), "(**cfg['custom_imports'])\n", (7473, 7498), False, 'from mmcv.utils import import_modules_from_strings\n'), ((9133, 9176), 'mmcv.runner.init_dist', 'init_dist', (['args.launcher'], {}), '(args.launcher, **cfg.dist_params)\n', (9142, 9176), False, 'from mmcv.runner import get_dist_info, init_dist, load_checkpoint, wrap_fp16_model\n'), ((9428, 9477), 'os.path.join', 'osp.join', (['args.work_dir', 'f"""eval_{timestamp}.json"""'], {}), "(args.work_dir, f'eval_{timestamp}.json')\n", (9436, 9477), True, 'import os.path as osp\n'), ((9958, 9980), 'mmcv.runner.wrap_fp16_model', 'wrap_fp16_model', (['model'], {}), '(model)\n', (9973, 9980), False, 'from mmcv.runner import get_dist_info, init_dist, load_checkpoint, wrap_fp16_model\n'), ((10100, 10119), 'mmcv.cnn.fuse_conv_bn', 'fuse_conv_bn', (['model'], {}), '(model)\n', (10112, 10119), False, 'from mmcv.cnn import fuse_conv_bn\n'), ((10424, 10461), 'mmcv.parallel.MMDataParallel', 'MMDataParallel', (['model'], {'device_ids': '[0]'}), '(model, device_ids=[0])\n', (10438, 10461), False, 'from mmcv.parallel import MMDataParallel, MMDistributedDataParallel\n'), ((10806, 10871), 'mmdet.apis.multi_gpu_test', 'multi_gpu_test', (['model', 'data_loader', 'args.tmpdir', 'args.gpu_collect'], {}), '(model, data_loader, args.tmpdir, args.gpu_collect)\n', (10820, 10871), False, 'from mmdet.apis import multi_gpu_test\n'), ((1461, 1476), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1474, 1476), False, 'import torch\n'), ((1891, 1946), 'mmcv.image.tensor2imgs', 'tensor2imgs', (['img_tensor'], {}), "(img_tensor, **img_metas[0]['img_norm_cfg'])\n", (1902, 1946), False, 'from mmcv.image import tensor2imgs\n'), ((8464, 8509), 'mmdet.datasets.replace_ImageToTensor', 'replace_ImageToTensor', (['cfg.data.test.pipeline'], {}), '(cfg.data.test.pipeline)\n', (8485, 8509), False, 'from mmdet.datasets import build_dataloader, build_dataset, replace_ImageToTensor\n'), ((9311, 9337), 'os.path.abspath', 'osp.abspath', (['args.work_dir'], {}), '(args.work_dir)\n', (9322, 9337), True, 'import os.path as osp\n'), ((9390, 9406), 'time.localtime', 'time.localtime', ([], {}), '()\n', (9404, 9406), False, 'import time\n'), ((11041, 11069), 'mmcv.dump', 'mmcv.dump', (['outputs', 'args.out'], {}), '(outputs, args.out)\n', (11050, 11069), False, 'import mmcv\n'), ((13311, 13362), 'numpy.array', 'np.array', (["[gt_bbox for gt_bbox in gt_ann['bboxes']]"], {}), "([gt_bbox for gt_bbox in gt_ann['bboxes']])\n", (13319, 13362), True, 'import numpy as np\n'), ((2242, 2281), 'mmcv.imresize', 'mmcv.imresize', (['img_show', '(ori_w, ori_h)'], {}), '(img_show, (ori_w, ori_h))\n', (2255, 2281), False, 'import mmcv\n'), ((11838, 11871), 'mmcv.dump', 'mmcv.dump', (['metric_dict', 'json_file'], {}), '(metric_dict, json_file)\n', (11847, 11871), False, 'import mmcv\n'), ((16495, 16534), 'sklearn.metrics.classification_report', 'classification_report', (['gt_ori', 'pred_ori'], {}), '(gt_ori, pred_ori)\n', (16516, 16534), False, 'from sklearn.metrics import classification_report\n'), ((16989, 17042), 'sklearn.metrics.classification_report', 'classification_report', (['gt_ori_4class', 'pred_ori_4class'], {}), '(gt_ori_4class, pred_ori_4class)\n', (17010, 17042), False, 'from sklearn.metrics import classification_report\n'), ((17195, 17244), 'sklearn.metrics.classification_report', 'classification_report', (['mint_gt_ori', 'mint_pred_ori'], {}), '(mint_gt_ori, mint_pred_ori)\n', (17216, 17244), False, 'from sklearn.metrics import classification_report\n'), ((2342, 2385), 'os.path.join', 'osp.join', (['out_dir', "img_meta['ori_filename']"], {}), "(out_dir, img_meta['ori_filename'])\n", (2350, 2385), True, 'import os.path as osp\n'), ((2773, 2806), 'mmdet.core.encode_mask_results', 'encode_mask_results', (['mask_results'], {}), '(mask_results)\n', (2792, 2806), False, 'from mmdet.core import encode_mask_results\n'), ((8856, 8894), 'mmdet.datasets.replace_ImageToTensor', 'replace_ImageToTensor', (['ds_cfg.pipeline'], {}), '(ds_cfg.pipeline)\n', (8877, 8894), False, 'from mmdet.datasets import build_dataloader, build_dataset, replace_ImageToTensor\n'), ((10721, 10748), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (10746, 10748), False, 'import torch\n'), ((14117, 14142), 'numpy.array', 'np.array', (['[bboxes[i][:4]]'], {}), '([bboxes[i][:4]])\n', (14125, 14142), True, 'import numpy as np\n'), ((14175, 14210), 'mmdet.core.evaluation.bbox_overlaps.bbox_overlaps', 'bbox_overlaps', (['pred_bbox', 'gt_bboxes'], {}), '(pred_bbox, gt_bboxes)\n', (14188, 14210), False, 'from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps\n'), ((14241, 14260), 'numpy.argmax', 'np.argmax', (['overlaps'], {}), '(overlaps)\n', (14250, 14260), True, 'import numpy as np\n'), ((14285, 14301), 'numpy.max', 'np.max', (['overlaps'], {}), '(overlaps)\n', (14291, 14301), True, 'import numpy as np\n'), ((14524, 14540), 'numpy.max', 'np.max', (['overlaps'], {}), '(overlaps)\n', (14530, 14540), True, 'import numpy as np\n'), ((14675, 14691), 'numpy.max', 'np.max', (['overlaps'], {}), '(overlaps)\n', (14681, 14691), True, 'import numpy as np\n'), ((15283, 15300), 'numpy.argmax', 'np.argmax', (['ori[i]'], {}), '(ori[i])\n', (15292, 15300), True, 'import numpy as np\n'), ((15360, 15374), 'numpy.max', 'np.max', (['ori[i]'], {}), '(ori[i])\n', (15366, 15374), True, 'import numpy as np\n'), ((14751, 14793), 'numpy.argmax', 'np.argmax', (["gt_ann['orientations'][max_idx]"], {}), "(gt_ann['orientations'][max_idx])\n", (14760, 14793), True, 'import numpy as np\n'), ((14836, 14853), 'numpy.argmax', 'np.argmax', (['ori[i]'], {}), '(ori[i])\n', (14845, 14853), True, 'import numpy as np\n'), ((16001, 16019), 'numpy.argmax', 'np.argmax', (['ori[sp]'], {}), '(ori[sp])\n', (16010, 16019), True, 'import numpy as np\n'), ((16065, 16102), 'numpy.argmax', 'np.argmax', (["gt_ann['orientations'][ix]"], {}), "(gt_ann['orientations'][ix])\n", (16074, 16102), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Helpers for various transformations."""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD-3-Clause
import os
import os.path as op
import glob
import numpy as np
from copy import deepcopy
from .fixes import jit, mean, _get_img_fdata
from .io.constants import FIFF
from .io.open import fiff_open
from .io.tag import read_tag
from .io.write import start_file, end_file, write_coord_trans
from .defaults import _handle_default
from .utils import (check_fname, logger, verbose, _ensure_int, _validate_type,
_path_like, get_subjects_dir, fill_doc, _check_fname,
_check_option, _require_version, wrapped_stdout)
# transformation from anterior/left/superior coordinate system to
# right/anterior/superior:
als_ras_trans = np.array([[0, -1, 0, 0], [1, 0, 0, 0], [0, 0, 1, 0],
[0, 0, 0, 1]])
_str_to_frame = dict(meg=FIFF.FIFFV_COORD_DEVICE,
mri=FIFF.FIFFV_COORD_MRI,
mri_voxel=FIFF.FIFFV_MNE_COORD_MRI_VOXEL,
head=FIFF.FIFFV_COORD_HEAD,
mni_tal=FIFF.FIFFV_MNE_COORD_MNI_TAL,
ras=FIFF.FIFFV_MNE_COORD_RAS,
fs_tal=FIFF.FIFFV_MNE_COORD_FS_TAL,
ctf_head=FIFF.FIFFV_MNE_COORD_CTF_HEAD,
ctf_meg=FIFF.FIFFV_MNE_COORD_CTF_DEVICE,
unknown=FIFF.FIFFV_COORD_UNKNOWN)
_frame_to_str = {val: key for key, val in _str_to_frame.items()}
_verbose_frames = {FIFF.FIFFV_COORD_UNKNOWN: 'unknown',
FIFF.FIFFV_COORD_DEVICE: 'MEG device',
FIFF.FIFFV_COORD_ISOTRAK: 'isotrak',
FIFF.FIFFV_COORD_HPI: 'hpi',
FIFF.FIFFV_COORD_HEAD: 'head',
FIFF.FIFFV_COORD_MRI: 'MRI (surface RAS)',
FIFF.FIFFV_MNE_COORD_MRI_VOXEL: 'MRI voxel',
FIFF.FIFFV_COORD_MRI_SLICE: 'MRI slice',
FIFF.FIFFV_COORD_MRI_DISPLAY: 'MRI display',
FIFF.FIFFV_MNE_COORD_CTF_DEVICE: 'CTF MEG device',
FIFF.FIFFV_MNE_COORD_CTF_HEAD: 'CTF/4D/KIT head',
FIFF.FIFFV_MNE_COORD_RAS: 'RAS (non-zero origin)',
FIFF.FIFFV_MNE_COORD_MNI_TAL: 'MNI Talairach',
FIFF.FIFFV_MNE_COORD_FS_TAL_GTZ: 'Talairach (MNI z > 0)',
FIFF.FIFFV_MNE_COORD_FS_TAL_LTZ: 'Talairach (MNI z < 0)',
-1: 'unknown'}
def _to_const(cf):
"""Convert string or int coord frame into int."""
if isinstance(cf, str):
if cf not in _str_to_frame:
raise ValueError(
f'Unknown coordinate frame {cf}, '
'expected "' + '", "'.join(_str_to_frame.keys()) + '"')
cf = _str_to_frame[cf]
else:
cf = _ensure_int(cf, 'coordinate frame', 'a str or int')
return int(cf)
class Transform(dict):
"""A transform.
Parameters
----------
fro : str | int
The starting coordinate frame. See notes for valid coordinate frames.
to : str | int
The ending coordinate frame. See notes for valid coordinate frames.
trans : array-like, shape (4, 4) | None
The transformation matrix. If None, an identity matrix will be
used.
Notes
-----
Valid coordinate frames are 'meg','mri','mri_voxel','head','mri_tal','ras'
'fs_tal','ctf_head','ctf_meg','unknown'
"""
def __init__(self, fro, to, trans=None): # noqa: D102
super(Transform, self).__init__()
# we could add some better sanity checks here
fro = _to_const(fro)
to = _to_const(to)
trans = np.eye(4) if trans is None else np.asarray(trans, np.float64)
if trans.shape != (4, 4):
raise ValueError(
f'Transformation must be shape (4, 4) not {trans.shape}')
self['from'] = fro
self['to'] = to
self['trans'] = trans
def __repr__(self): # noqa: D105
with np.printoptions(suppress=True): # suppress scientific notation
return '<Transform | {fro}->{to}>\n{trans}'.format(
fro=_coord_frame_name(self['from']),
to=_coord_frame_name(self['to']), trans=self['trans'])
def __eq__(self, other, rtol=0., atol=0.):
"""Check for equality.
Parameter
---------
other : instance of Transform
The other transform.
rtol : float
Relative tolerance.
atol : float
Absolute tolerance.
Returns
-------
eq : bool
True if the transforms are equal.
"""
return (isinstance(other, Transform) and
self['from'] == other['from'] and
self['to'] == other['to'] and
np.allclose(self['trans'], other['trans'], rtol=rtol,
atol=atol))
def __ne__(self, other, rtol=0., atol=0.):
"""Check for inequality.
Parameter
---------
other : instance of Transform
The other transform.
rtol : float
Relative tolerance.
atol : float
Absolute tolerance.
Returns
-------
eq : bool
True if the transforms are not equal.
"""
return not self == other
@property
def from_str(self):
"""The "from" frame as a string."""
return _coord_frame_name(self['from'])
@property
def to_str(self):
"""The "to" frame as a string."""
return _coord_frame_name(self['to'])
def save(self, fname):
"""Save the transform as -trans.fif file.
Parameters
----------
fname : str
The name of the file, which should end in '-trans.fif'.
"""
write_trans(fname, self)
def copy(self):
"""Make a copy of the transform."""
return deepcopy(self)
def _coord_frame_name(cframe):
"""Map integers to human-readable (verbose) names."""
return _verbose_frames.get(int(cframe), 'unknown')
def _print_coord_trans(t, prefix='Coordinate transformation: ', units='m',
level='info'):
# Units gives the units of the transformation. This always prints in mm.
log_func = getattr(logger, level)
log_func(prefix + '{fro} -> {to}'.format(
fro=_coord_frame_name(t['from']), to=_coord_frame_name(t['to'])))
for ti, tt in enumerate(t['trans']):
scale = 1000. if (ti != 3 and units != 'mm') else 1.
text = ' mm' if ti != 3 else ''
log_func(' % 8.6f % 8.6f % 8.6f %7.2f%s' %
(tt[0], tt[1], tt[2], scale * tt[3], text))
def _find_trans(subject, subjects_dir=None):
if subject is None:
if 'SUBJECT' in os.environ:
subject = os.environ['SUBJECT']
else:
raise ValueError('SUBJECT environment variable not set')
trans_fnames = glob.glob(op.join(subjects_dir, subject, '*-trans.fif'))
if len(trans_fnames) < 1:
raise RuntimeError('Could not find the transformation for '
'{subject}'.format(subject=subject))
elif len(trans_fnames) > 1:
raise RuntimeError('Found multiple transformations for '
'{subject}'.format(subject=subject))
return trans_fnames[0]
def apply_trans(trans, pts, move=True):
"""Apply a transform matrix to an array of points.
Parameters
----------
trans : array, shape = (4, 4) | instance of Transform
Transform matrix.
pts : array, shape = (3,) | (n, 3)
Array with coordinates for one or n points.
move : bool
If True (default), apply translation.
Returns
-------
transformed_pts : shape = (3,) | (n, 3)
Transformed point(s).
"""
if isinstance(trans, dict):
trans = trans['trans']
pts = np.asarray(pts)
if pts.size == 0:
return pts.copy()
# apply rotation & scale
out_pts = np.dot(pts, trans[:3, :3].T)
# apply translation
if move:
out_pts += trans[:3, 3]
return out_pts
def rotation(x=0, y=0, z=0):
"""Create an array with a 4 dimensional rotation matrix.
Parameters
----------
x, y, z : scalar
Rotation around the origin (in rad).
Returns
-------
r : array, shape = (4, 4)
The rotation matrix.
"""
cos_x = np.cos(x)
cos_y = np.cos(y)
cos_z = np.cos(z)
sin_x = np.sin(x)
sin_y = np.sin(y)
sin_z = np.sin(z)
r = np.array([[cos_y * cos_z, -cos_x * sin_z + sin_x * sin_y * cos_z,
sin_x * sin_z + cos_x * sin_y * cos_z, 0],
[cos_y * sin_z, cos_x * cos_z + sin_x * sin_y * sin_z,
- sin_x * cos_z + cos_x * sin_y * sin_z, 0],
[-sin_y, sin_x * cos_y, cos_x * cos_y, 0],
[0, 0, 0, 1]], dtype=float)
return r
def rotation3d(x=0, y=0, z=0):
"""Create an array with a 3 dimensional rotation matrix.
Parameters
----------
x, y, z : scalar
Rotation around the origin (in rad).
Returns
-------
r : array, shape = (3, 3)
The rotation matrix.
"""
cos_x = np.cos(x)
cos_y = np.cos(y)
cos_z = np.cos(z)
sin_x = np.sin(x)
sin_y = np.sin(y)
sin_z = np.sin(z)
r = np.array([[cos_y * cos_z, -cos_x * sin_z + sin_x * sin_y * cos_z,
sin_x * sin_z + cos_x * sin_y * cos_z],
[cos_y * sin_z, cos_x * cos_z + sin_x * sin_y * sin_z,
- sin_x * cos_z + cos_x * sin_y * sin_z],
[-sin_y, sin_x * cos_y, cos_x * cos_y]], dtype=float)
return r
def rotation3d_align_z_axis(target_z_axis):
"""Compute a rotation matrix to align [ 0 0 1] with supplied target z axis.
Parameters
----------
target_z_axis : array, shape (1, 3)
z axis. computed matrix (r) will map [0 0 1] to target_z_axis
Returns
-------
r : array, shape (3, 3)
The rotation matrix.
"""
target_z_axis = target_z_axis / np.linalg.norm(target_z_axis)
r = np.zeros((3, 3))
if ((1. + target_z_axis[2]) < 1E-12):
r[0, 0] = 1.
r[1, 1] = -1.
r[2, 2] = -1.
else:
f = 1. / (1. + target_z_axis[2])
r[0, 0] = 1. - 1. * f * target_z_axis[0] * target_z_axis[0]
r[0, 1] = -1. * f * target_z_axis[0] * target_z_axis[1]
r[0, 2] = target_z_axis[0]
r[1, 0] = -1. * f * target_z_axis[0] * target_z_axis[1]
r[1, 1] = 1. - 1. * f * target_z_axis[1] * target_z_axis[1]
r[1, 2] = target_z_axis[1]
r[2, 0] = -target_z_axis[0]
r[2, 1] = -target_z_axis[1]
r[2, 2] = 1. - f * (target_z_axis[0] * target_z_axis[0] +
target_z_axis[1] * target_z_axis[1])
# assert that r is a rotation matrix r^t * r = I and det(r) = 1
assert(np.any((r.dot(r.T) - np.identity(3)) < 1E-12))
assert((np.linalg.det(r) - 1.0) < 1E-12)
# assert that r maps [0 0 1] on the device z axis (target_z_axis)
assert(np.linalg.norm(target_z_axis - r.dot([0, 0, 1])) < 1e-12)
return r
def rotation_angles(m):
"""Find rotation angles from a transformation matrix.
Parameters
----------
m : array, shape >= (3, 3)
Rotation matrix. Only the top left 3 x 3 partition is accessed.
Returns
-------
x, y, z : float
Rotation around x, y and z axes.
"""
x = np.arctan2(m[2, 1], m[2, 2])
c2 = np.sqrt(m[0, 0] ** 2 + m[1, 0] ** 2)
y = np.arctan2(-m[2, 0], c2)
s1 = np.sin(x)
c1 = np.cos(x)
z = np.arctan2(s1 * m[0, 2] - c1 * m[0, 1], c1 * m[1, 1] - s1 * m[1, 2])
return x, y, z
def scaling(x=1, y=1, z=1):
"""Create an array with a scaling matrix.
Parameters
----------
x, y, z : scalar
Scaling factors.
Returns
-------
s : array, shape = (4, 4)
The scaling matrix.
"""
s = np.array([[x, 0, 0, 0],
[0, y, 0, 0],
[0, 0, z, 0],
[0, 0, 0, 1]], dtype=float)
return s
def translation(x=0, y=0, z=0):
"""Create an array with a translation matrix.
Parameters
----------
x, y, z : scalar
Translation parameters.
Returns
-------
m : array, shape = (4, 4)
The translation matrix.
"""
m = np.array([[1, 0, 0, x],
[0, 1, 0, y],
[0, 0, 1, z],
[0, 0, 0, 1]], dtype=float)
return m
def _ensure_trans(trans, fro='mri', to='head'):
"""Ensure we have the proper transform."""
if isinstance(fro, str):
from_str = fro
from_const = _str_to_frame[fro]
else:
from_str = _frame_to_str[fro]
from_const = fro
del fro
if isinstance(to, str):
to_str = to
to_const = _str_to_frame[to]
else:
to_str = _frame_to_str[to]
to_const = to
del to
err_str = 'trans must be a Transform between ' \
f'{from_str}<->{to_str}, got'
if not isinstance(trans, (list, tuple)):
trans = [trans]
# Ensure that we have exactly one match
idx = list()
misses = list()
for ti, this_trans in enumerate(trans):
if not isinstance(this_trans, Transform):
raise ValueError(f'{err_str} None')
if {this_trans['from'],
this_trans['to']} == {from_const, to_const}:
idx.append(ti)
else:
misses += ['{fro}->{to}'.format(
fro=_frame_to_str[this_trans['from']],
to=_frame_to_str[this_trans['to']])]
if len(idx) != 1:
raise ValueError(f'{err_str} ' + ', '.join(misses))
trans = trans[idx[0]]
if trans['from'] != from_const:
trans = invert_transform(trans)
return trans
def _get_trans(trans, fro='mri', to='head', allow_none=True):
"""Get mri_head_t (from=mri, to=head) from mri filename."""
types = (Transform, 'path-like')
if allow_none:
types += (None,)
_validate_type(trans, types, 'trans')
if _path_like(trans):
trans = str(trans)
if trans == 'fsaverage':
trans = op.join(op.dirname(__file__), 'data', 'fsaverage',
'fsaverage-trans.fif')
if not op.isfile(trans):
raise IOError(f'trans file "{trans}" not found')
if op.splitext(trans)[1] in ['.fif', '.gz']:
fro_to_t = read_trans(trans)
else:
# convert "-trans.txt" to "-trans.fif" mri-type equivalent
# these are usually actually in to_fro form
t = np.genfromtxt(trans)
if t.ndim != 2 or t.shape != (4, 4):
raise RuntimeError(f'File "{trans}" did not have 4x4 entries')
fro_to_t = Transform(to, fro, t)
elif isinstance(trans, Transform):
fro_to_t = trans
trans = 'instance of Transform'
else:
assert trans is None
fro_to_t = Transform(fro, to)
trans = 'identity'
# it's usually a head->MRI transform, so we probably need to invert it
fro_to_t = _ensure_trans(fro_to_t, fro, to)
return fro_to_t, trans
def combine_transforms(t_first, t_second, fro, to):
"""Combine two transforms.
Parameters
----------
t_first : dict
First transform.
t_second : dict
Second transform.
fro : int
From coordinate frame.
to : int
To coordinate frame.
Returns
-------
trans : dict
Combined transformation.
"""
fro = _to_const(fro)
to = _to_const(to)
if t_first['from'] != fro:
raise RuntimeError(
'From mismatch: {fro1} ("{cf1}") != {fro2} ("{cf2}")'.format(
fro1=t_first['from'], cf1=_coord_frame_name(t_first['from']),
fro2=fro, cf2=_coord_frame_name(fro)))
if t_first['to'] != t_second['from']:
raise RuntimeError('Transform mismatch: t1["to"] = {to1} ("{cf1}"), '
't2["from"] = {fro2} ("{cf2}")'.format(
to1=t_first['to'],
cf1=_coord_frame_name(t_first['to']),
fro2=t_second['from'],
cf2=_coord_frame_name(t_second['from'])))
if t_second['to'] != to:
raise RuntimeError(
'To mismatch: {to1} ("{cf1}") != {to2} ("{cf2}")'.format(
to1=t_second['to'], cf1=_coord_frame_name(t_second['to']),
to2=to, cf2=_coord_frame_name(to)))
return Transform(fro, to, np.dot(t_second['trans'], t_first['trans']))
@verbose
def read_trans(fname, return_all=False, verbose=None):
"""Read a -trans.fif file.
Parameters
----------
fname : str
The name of the file.
return_all : bool
If True, return all transformations in the file.
False (default) will only return the first.
.. versionadded:: 0.15
%(verbose)s
Returns
-------
trans : dict | list of dict
The transformation dictionary from the fif file.
See Also
--------
write_trans
mne.transforms.Transform
"""
fid, tree, directory = fiff_open(fname)
trans = list()
with fid:
for t in directory:
if t.kind == FIFF.FIFF_COORD_TRANS:
trans.append(read_tag(fid, t.pos).data)
if not return_all:
break
if len(trans) == 0:
raise IOError('This does not seem to be a -trans.fif file.')
return trans if return_all else trans[0]
def write_trans(fname, trans):
"""Write a -trans.fif file.
Parameters
----------
fname : str
The name of the file, which should end in '-trans.fif'.
trans : dict
Trans file data, as returned by read_trans.
See Also
--------
read_trans
"""
check_fname(fname, 'trans', ('-trans.fif', '-trans.fif.gz',
'_trans.fif', '_trans.fif.gz'))
# TODO: Add `overwrite` param to method signature
fname = _check_fname(fname=fname, overwrite=True)
fid = start_file(fname)
write_coord_trans(fid, trans)
end_file(fid)
def invert_transform(trans):
"""Invert a transformation between coordinate systems.
Parameters
----------
trans : dict
Transform to invert.
Returns
-------
inv_trans : dict
Inverse transform.
"""
return Transform(trans['to'], trans['from'], np.linalg.inv(trans['trans']))
def transform_surface_to(surf, dest, trans, copy=False):
"""Transform surface to the desired coordinate system.
Parameters
----------
surf : dict
Surface.
dest : 'meg' | 'mri' | 'head' | int
Destination coordinate system. Can be an integer for using
FIFF types.
trans : dict | list of dict
Transformation to use (or a list of possible transformations to
check).
copy : bool
If False (default), operate in-place.
Returns
-------
res : dict
Transformed source space.
"""
surf = deepcopy(surf) if copy else surf
if isinstance(dest, str):
if dest not in _str_to_frame:
raise KeyError('dest must be one of %s, not "%s"'
% (list(_str_to_frame.keys()), dest))
dest = _str_to_frame[dest] # convert to integer
if surf['coord_frame'] == dest:
return surf
trans = _ensure_trans(trans, int(surf['coord_frame']), dest)
surf['coord_frame'] = dest
surf['rr'] = apply_trans(trans, surf['rr'])
if 'nn' in surf:
surf['nn'] = apply_trans(trans, surf['nn'], move=False)
return surf
def get_ras_to_neuromag_trans(nasion, lpa, rpa):
"""Construct a transformation matrix to the MNE head coordinate system.
Construct a transformation matrix from an arbitrary RAS coordinate system
to the MNE head coordinate system, in which the x axis passes through the
two preauricular points, and the y axis passes through the nasion and is
normal to the x axis. (see mne manual, pg. 97)
Parameters
----------
nasion : array_like, shape (3,)
Nasion point coordinate.
lpa : array_like, shape (3,)
Left peri-auricular point coordinate.
rpa : array_like, shape (3,)
Right peri-auricular point coordinate.
Returns
-------
trans : numpy.array, shape = (4, 4)
Transformation matrix to MNE head space.
"""
# check input args
nasion = np.asarray(nasion)
lpa = np.asarray(lpa)
rpa = np.asarray(rpa)
for pt in (nasion, lpa, rpa):
if pt.ndim != 1 or len(pt) != 3:
raise ValueError("Points have to be provided as one dimensional "
"arrays of length 3.")
right = rpa - lpa
right_unit = right / np.linalg.norm(right)
origin = lpa + np.dot(nasion - lpa, right_unit) * right_unit
anterior = nasion - origin
anterior_unit = anterior / np.linalg.norm(anterior)
superior_unit = np.cross(right_unit, anterior_unit)
x, y, z = -origin
origin_trans = translation(x, y, z)
trans_l = np.vstack((right_unit, anterior_unit, superior_unit, [0, 0, 0]))
trans_r = np.reshape([0, 0, 0, 1], (4, 1))
rot_trans = np.hstack((trans_l, trans_r))
trans = np.dot(rot_trans, origin_trans)
return trans
def _get_transforms_to_coord_frame(info, trans, coord_frame='mri'):
"""Get the transforms to a coordinate frame from device, head and mri."""
head_mri_t = _get_trans(trans, 'head', 'mri')[0]
dev_head_t = _get_trans(info['dev_head_t'], 'meg', 'head')[0]
mri_dev_t = invert_transform(combine_transforms(
dev_head_t, head_mri_t, 'meg', 'mri'))
to_cf_t = dict(
meg=_ensure_trans([dev_head_t, mri_dev_t, Transform('meg', 'meg')],
fro='meg', to=coord_frame),
head=_ensure_trans([dev_head_t, head_mri_t, Transform('head', 'head')],
fro='head', to=coord_frame),
mri=_ensure_trans([head_mri_t, mri_dev_t, Transform('mri', 'mri')],
fro='mri', to=coord_frame))
return to_cf_t
###############################################################################
# Spherical coordinates and harmonics
def _cart_to_sph(cart):
"""Convert Cartesian coordinates to spherical coordinates.
Parameters
----------
cart_pts : ndarray, shape (n_points, 3)
Array containing points in Cartesian coordinates (x, y, z)
Returns
-------
sph_pts : ndarray, shape (n_points, 3)
Array containing points in spherical coordinates (rad, azimuth, polar)
"""
assert cart.ndim == 2 and cart.shape[1] == 3
cart = np.atleast_2d(cart)
out = np.empty((len(cart), 3))
out[:, 0] = np.sqrt(np.sum(cart * cart, axis=1))
norm = np.where(out[:, 0] > 0, out[:, 0], 1) # protect against / 0
out[:, 1] = np.arctan2(cart[:, 1], cart[:, 0])
out[:, 2] = np.arccos(cart[:, 2] / norm)
out = np.nan_to_num(out)
return out
def _sph_to_cart(sph_pts):
"""Convert spherical coordinates to Cartesion coordinates.
Parameters
----------
sph_pts : ndarray, shape (n_points, 3)
Array containing points in spherical coordinates (rad, azimuth, polar)
Returns
-------
cart_pts : ndarray, shape (n_points, 3)
Array containing points in Cartesian coordinates (x, y, z)
"""
assert sph_pts.ndim == 2 and sph_pts.shape[1] == 3
sph_pts = np.atleast_2d(sph_pts)
cart_pts = np.empty((len(sph_pts), 3))
cart_pts[:, 2] = sph_pts[:, 0] * np.cos(sph_pts[:, 2])
xy = sph_pts[:, 0] * np.sin(sph_pts[:, 2])
cart_pts[:, 0] = xy * np.cos(sph_pts[:, 1])
cart_pts[:, 1] = xy * np.sin(sph_pts[:, 1])
return cart_pts
def _get_n_moments(order):
"""Compute the number of multipolar moments (spherical harmonics).
Equivalent to :footcite:`DarvasEtAl2006` Eq. 32.
.. note:: This count excludes ``degree=0`` (for ``order=0``).
Parameters
----------
order : array-like
Expansion orders, often ``[int_order, ext_order]``.
Returns
-------
M : ndarray
Number of moments due to each order.
"""
order = np.asarray(order, int)
return (order + 2) * order
def _sph_to_cart_partials(az, pol, g_rad, g_az, g_pol):
"""Convert spherical partial derivatives to cartesian coords.
Note: Because we are dealing with partial derivatives, this calculation is
not a static transformation. The transformation matrix itself is dependent
on azimuth and polar coord.
See the 'Spherical coordinate sytem' section here:
wikipedia.org/wiki/Vector_fields_in_cylindrical_and_spherical_coordinates
Parameters
----------
az : ndarray, shape (n_points,)
Array containing spherical coordinates points (azimuth).
pol : ndarray, shape (n_points,)
Array containing spherical coordinates points (polar).
sph_grads : ndarray, shape (n_points, 3)
Array containing partial derivatives at each spherical coordinate
(radius, azimuth, polar).
Returns
-------
cart_grads : ndarray, shape (n_points, 3)
Array containing partial derivatives in Cartesian coordinates (x, y, z)
"""
sph_grads = np.c_[g_rad, g_az, g_pol]
c_as, s_as = np.cos(az), np.sin(az)
c_ps, s_ps = np.cos(pol), np.sin(pol)
trans = np.array([[c_as * s_ps, -s_as, c_as * c_ps],
[s_as * s_ps, c_as, c_ps * s_as],
[c_ps, np.zeros_like(c_as), -s_ps]])
cart_grads = np.einsum('ijk,kj->ki', trans, sph_grads)
return cart_grads
def _deg_ord_idx(deg, order):
"""Get the index into S_in or S_out given a degree and order."""
# The -1 here is because we typically exclude the degree=0 term
return deg * deg + deg + order - 1
def _sh_negate(sh, order):
"""Get the negative spherical harmonic from a positive one."""
assert order >= 0
return sh.conj() * (-1. if order % 2 else 1.) # == (-1) ** order
def _sh_complex_to_real(sh, order):
"""Convert complex to real basis functions.
Parameters
----------
sh : array-like
Spherical harmonics. Must be from order >=0 even if negative orders
are used.
order : int
Order (usually 'm') of multipolar moment.
Returns
-------
real_sh : array-like
The real version of the spherical harmonics.
Notes
-----
This does not include the Condon-Shortely phase.
"""
if order == 0:
return np.real(sh)
else:
return np.sqrt(2.) * (np.real if order > 0 else np.imag)(sh)
def _sh_real_to_complex(shs, order):
"""Convert real spherical harmonic pair to complex.
Parameters
----------
shs : ndarray, shape (2, ...)
The real spherical harmonics at ``[order, -order]``.
order : int
Order (usually 'm') of multipolar moment.
Returns
-------
sh : array-like, shape (...)
The complex version of the spherical harmonics.
"""
if order == 0:
return shs[0]
else:
return (shs[0] + 1j * np.sign(order) * shs[1]) / np.sqrt(2.)
def _compute_sph_harm(order, az, pol):
"""Compute complex spherical harmonics of spherical coordinates."""
from scipy.special import sph_harm
out = np.empty((len(az), _get_n_moments(order) + 1))
# _deg_ord_idx(0, 0) = -1 so we're actually okay to use it here
for degree in range(order + 1):
for order_ in range(degree + 1):
sph = sph_harm(order_, degree, az, pol)
out[:, _deg_ord_idx(degree, order_)] = \
_sh_complex_to_real(sph, order_)
if order_ > 0:
out[:, _deg_ord_idx(degree, -order_)] = \
_sh_complex_to_real(_sh_negate(sph, order_), -order_)
return out
###############################################################################
# Thin-plate spline transformations
# Adapted from code from the MATLAB file exchange:
# https://www.mathworks.com/matlabcentral/fileexchange/
# 53867-3d-point-set-warping-by-thin-plate-rbf-function
# https://www.mathworks.com/matlabcentral/fileexchange/
# 53828-rbf-or-thin-plate-splines-image-warping
# Associated (BSD 2-clause) license:
#
# Copyright (c) 2015, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the distribution
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
class _TPSWarp(object):
"""Transform points using thin-plate spline (TPS) warping.
Notes
-----
Based on the method by :footcite:`Bookstein1989` and
adapted from code by <NAME> (<EMAIL>>).
References
----------
.. footbibliography::
"""
def fit(self, source, destination, reg=1e-3):
from scipy import linalg
from scipy.spatial.distance import cdist
assert source.shape[1] == destination.shape[1] == 3
assert source.shape[0] == destination.shape[0]
# Forward warping, different from image warping, use |dist|**2
dists = _tps(cdist(source, destination, 'sqeuclidean'))
# Y = L * w
# L: RBF matrix about source
# Y: Points matrix about destination
P = np.concatenate((np.ones((source.shape[0], 1)), source), axis=-1)
L = np.vstack([np.hstack([dists, P]),
np.hstack([P.T, np.zeros((4, 4))])])
Y = np.concatenate((destination, np.zeros((4, 3))), axis=0)
# Regularize it a bit
L += reg * np.eye(L.shape[0])
self._destination = destination.copy()
self._weights = linalg.lstsq(L, Y)[0]
return self
@verbose
def transform(self, pts, verbose=None):
"""Apply the warp.
Parameters
----------
pts : shape (n_transform, 3)
Source points to warp to the destination.
Returns
-------
dest : shape (n_transform, 3)
The transformed points.
"""
logger.info('Transforming %s points' % (len(pts),))
from scipy.spatial.distance import cdist
assert pts.shape[1] == 3
# for memory reasons, we should do this in ~100 MB chunks
out = np.zeros_like(pts)
n_splits = max(int((pts.shape[0] * self._destination.shape[0]) /
(100e6 / 8.)), 1)
for this_out, this_pts in zip(np.array_split(out, n_splits),
np.array_split(pts, n_splits)):
dists = _tps(cdist(this_pts, self._destination, 'sqeuclidean'))
L = np.hstack((dists, np.ones((dists.shape[0], 1)), this_pts))
this_out[:] = np.dot(L, self._weights)
assert not (out == 0).any()
return out
def _tps(distsq):
"""Thin-plate function (r ** 2) * np.log(r)."""
# NOTE: For our warping functions, a radial basis like
# exp(-distsq / radius ** 2) could also be used
out = np.zeros_like(distsq)
mask = distsq > 0 # avoid log(0)
valid = distsq[mask]
out[mask] = valid * np.log(valid)
return out
###############################################################################
# Spherical harmonic approximation + TPS warp
class _SphericalSurfaceWarp(object):
"""Warp surfaces via spherical harmonic smoothing and thin-plate splines.
Notes
-----
This class can be used to warp data from a source subject to
a destination subject, as described in :footcite:`DarvasEtAl2006`.
The procedure is:
1. Perform a spherical harmonic approximation to the source and
destination surfaces, which smooths them and allows arbitrary
interpolation.
2. Choose a set of matched points on the two surfaces.
3. Use thin-plate spline warping (common in 2D image manipulation)
to generate transformation coefficients.
4. Warp points from the source subject (which should be inside the
original surface) to the destination subject.
.. versionadded:: 0.14
References
----------
.. footbibliography::
"""
def __repr__(self):
rep = '<SphericalSurfaceWarp : '
if not hasattr(self, '_warp'):
rep += 'no fitting done >'
else:
rep += ('fit %d->%d pts using match=%s (%d pts), order=%s, reg=%s>'
% tuple(self._fit_params[key]
for key in ['n_src', 'n_dest', 'match', 'n_match',
'order', 'reg']))
return rep
@verbose
def fit(self, source, destination, order=4, reg=1e-5, center=True,
match='oct5', verbose=None):
"""Fit the warp from source points to destination points.
Parameters
----------
source : array, shape (n_src, 3)
The source points.
destination : array, shape (n_dest, 3)
The destination points.
order : int
Order of the spherical harmonic fit.
reg : float
Regularization of the TPS warp.
center : bool
If True, center the points by fitting a sphere to points
that are in a reasonable region for head digitization.
match : str
The uniformly-spaced points to match on the two surfaces.
Can be "ico#" or "oct#" where "#" is an integer.
The default is "oct5".
%(verbose)s
Returns
-------
inst : instance of SphericalSurfaceWarp
The warping object (for chaining).
"""
from scipy import linalg
from .bem import _fit_sphere
from .source_space import _check_spacing
match_rr = _check_spacing(match, verbose=False)[2]['rr']
logger.info('Computing TPS warp')
src_center = dest_center = np.zeros(3)
if center:
logger.info(' Centering data')
hsp = np.array([p for p in source
if not (p[2] < -1e-6 and p[1] > 1e-6)])
src_center = _fit_sphere(hsp, disp=False)[1]
source = source - src_center
hsp = np.array([p for p in destination
if not (p[2] < 0 and p[1] > 0)])
dest_center = _fit_sphere(hsp, disp=False)[1]
destination = destination - dest_center
logger.info(' Using centers %s -> %s'
% (np.array_str(src_center, None, 3),
np.array_str(dest_center, None, 3)))
self._fit_params = dict(
n_src=len(source), n_dest=len(destination), match=match,
n_match=len(match_rr), order=order, reg=reg)
assert source.shape[1] == destination.shape[1] == 3
self._destination = destination.copy()
# 1. Compute spherical coordinates of source and destination points
logger.info(' Converting to spherical coordinates')
src_rad_az_pol = _cart_to_sph(source).T
dest_rad_az_pol = _cart_to_sph(destination).T
match_rad_az_pol = _cart_to_sph(match_rr).T
del match_rr
# 2. Compute spherical harmonic coefficients for all points
logger.info(' Computing spherical harmonic approximation with '
'order %s' % order)
src_sph = _compute_sph_harm(order, *src_rad_az_pol[1:])
dest_sph = _compute_sph_harm(order, *dest_rad_az_pol[1:])
match_sph = _compute_sph_harm(order, *match_rad_az_pol[1:])
# 3. Fit spherical harmonics to both surfaces to smooth them
src_coeffs = linalg.lstsq(src_sph, src_rad_az_pol[0])[0]
dest_coeffs = linalg.lstsq(dest_sph, dest_rad_az_pol[0])[0]
# 4. Smooth both surfaces using these coefficients, and evaluate at
# the "shape" points
logger.info(' Matching %d points (%s) on smoothed surfaces'
% (len(match_sph), match))
src_rad_az_pol = match_rad_az_pol.copy()
src_rad_az_pol[0] = np.abs(np.dot(match_sph, src_coeffs))
dest_rad_az_pol = match_rad_az_pol.copy()
dest_rad_az_pol[0] = np.abs(np.dot(match_sph, dest_coeffs))
# 5. Convert matched points to Cartesion coordinates and put back
source = _sph_to_cart(src_rad_az_pol.T)
source += src_center
destination = _sph_to_cart(dest_rad_az_pol.T)
destination += dest_center
# 6. Compute TPS warp of matched points from smoothed surfaces
self._warp = _TPSWarp().fit(source, destination, reg)
self._matched = np.array([source, destination])
logger.info('[done]')
return self
@verbose
def transform(self, source, verbose=None):
"""Transform arbitrary source points to the destination.
Parameters
----------
source : ndarray, shape (n_pts, 3)
Source points to transform. They do not need to be the same
points that were used to generate the model, although ideally
they will be inside the convex hull formed by the original
source points.
%(verbose)s
Returns
-------
destination : ndarray, shape (n_pts, 3)
The points transformed to the destination space.
"""
return self._warp.transform(source)
###############################################################################
# Other transforms
def _pol_to_cart(pol):
"""Transform polar coordinates to cartesian."""
out = np.empty((len(pol), 2))
if pol.shape[1] == 2: # phi, theta
out[:, 0] = pol[:, 0] * np.cos(pol[:, 1])
out[:, 1] = pol[:, 0] * np.sin(pol[:, 1])
else: # radial distance, theta, phi
d = pol[:, 0] * np.sin(pol[:, 2])
out[:, 0] = d * np.cos(pol[:, 1])
out[:, 1] = d * np.sin(pol[:, 1])
return out
def _topo_to_sph(topo):
"""Convert 2D topo coordinates to spherical coordinates."""
assert topo.ndim == 2 and topo.shape[1] == 2
sph = np.ones((len(topo), 3))
sph[:, 1] = -np.deg2rad(topo[:, 0])
sph[:, 2] = np.pi * topo[:, 1]
return sph
###############################################################################
# Quaternions
@jit()
def quat_to_rot(quat):
"""Convert a set of quaternions to rotations.
Parameters
----------
quat : array, shape (..., 3)
The q1, q2, and q3 (x, y, z) parameters of a unit quaternion.
Returns
-------
rot : array, shape (..., 3, 3)
The corresponding rotation matrices.
See Also
--------
rot_to_quat
"""
# z = a + bi + cj + dk
b, c, d = quat[..., 0], quat[..., 1], quat[..., 2]
bb, cc, dd = b * b, c * c, d * d
# use max() here to be safe in case roundoff errs put us over
aa = np.maximum(1. - bb - cc - dd, 0.)
a = np.sqrt(aa)
ab_2 = 2 * a * b
ac_2 = 2 * a * c
ad_2 = 2 * a * d
bc_2 = 2 * b * c
bd_2 = 2 * b * d
cd_2 = 2 * c * d
rotation = np.empty(quat.shape[:-1] + (3, 3))
rotation[..., 0, 0] = aa + bb - cc - dd
rotation[..., 0, 1] = bc_2 - ad_2
rotation[..., 0, 2] = bd_2 + ac_2
rotation[..., 1, 0] = bc_2 + ad_2
rotation[..., 1, 1] = aa + cc - bb - dd
rotation[..., 1, 2] = cd_2 - ab_2
rotation[..., 2, 0] = bd_2 - ac_2
rotation[..., 2, 1] = cd_2 + ab_2
rotation[..., 2, 2] = aa + dd - bb - cc
return rotation
@jit()
def _one_rot_to_quat(rot):
"""Convert a rotation matrix to quaternions."""
# see e.g. http://www.euclideanspace.com/maths/geometry/rotations/
# conversions/matrixToQuaternion/
det = np.linalg.det(np.reshape(rot, (3, 3)))
if np.abs(det - 1.) > 1e-3:
raise ValueError('Matrix is not a pure rotation, got determinant != 1')
t = 1. + rot[0] + rot[4] + rot[8]
if t > np.finfo(rot.dtype).eps:
s = np.sqrt(t) * 2.
# qw = 0.25 * s
qx = (rot[7] - rot[5]) / s
qy = (rot[2] - rot[6]) / s
qz = (rot[3] - rot[1]) / s
elif rot[0] > rot[4] and rot[0] > rot[8]:
s = np.sqrt(1. + rot[0] - rot[4] - rot[8]) * 2.
# qw = (rot[7] - rot[5]) / s
qx = 0.25 * s
qy = (rot[1] + rot[3]) / s
qz = (rot[2] + rot[6]) / s
elif rot[4] > rot[8]:
s = np.sqrt(1. - rot[0] + rot[4] - rot[8]) * 2
# qw = (rot[2] - rot[6]) / s
qx = (rot[1] + rot[3]) / s
qy = 0.25 * s
qz = (rot[5] + rot[7]) / s
else:
s = np.sqrt(1. - rot[0] - rot[4] + rot[8]) * 2.
# qw = (rot[3] - rot[1]) / s
qx = (rot[2] + rot[6]) / s
qy = (rot[5] + rot[7]) / s
qz = 0.25 * s
return np.array((qx, qy, qz))
def rot_to_quat(rot):
"""Convert a set of rotations to quaternions.
Parameters
----------
rot : array, shape (..., 3, 3)
The rotation matrices to convert.
Returns
-------
quat : array, shape (..., 3)
The q1, q2, and q3 (x, y, z) parameters of the corresponding
unit quaternions.
See Also
--------
quat_to_rot
"""
rot = rot.reshape(rot.shape[:-2] + (9,))
return np.apply_along_axis(_one_rot_to_quat, -1, rot)
def _quat_to_affine(quat):
assert quat.shape == (6,)
affine = np.eye(4)
affine[:3, :3] = quat_to_rot(quat[:3])
affine[:3, 3] = quat[3:]
return affine
def _angle_between_quats(x, y):
"""Compute the ang between two quaternions w/3-element representations."""
# z = conj(x) * y
# conjugate just negates all but the first element in a 4-element quat,
# so it's just a negative for us
z = _quat_mult(-x, y)
z0 = _quat_real(z)
return 2 * np.arctan2(np.linalg.norm(z, axis=-1), z0)
def _quat_real(quat):
"""Get the real part of our 3-element quat."""
assert quat.shape[-1] == 3, quat.shape[-1]
return np.sqrt(np.maximum(1. -
quat[..., 0] * quat[..., 0] -
quat[..., 1] * quat[..., 1] -
quat[..., 2] * quat[..., 2], 0.))
def _quat_mult(one, two):
assert one.shape[-1] == two.shape[-1] == 3
w1 = _quat_real(one)
w2 = _quat_real(two)
out = np.empty(np.broadcast(one, two).shape)
# Most mathematical expressions use this sort of notation
x1, x2 = one[..., 0], two[..., 0]
y1, y2 = one[..., 1], two[..., 1]
z1, z2 = one[..., 2], two[..., 2]
out[..., 0] = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2
out[..., 1] = w1 * y2 - x1 * z2 + y1 * w2 + z1 * x2
out[..., 2] = w1 * z2 + x1 * y2 - y1 * x2 + z1 * w2
# only need to compute w because we need signs from it
w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2
signs = np.sign(w)
signs = np.where(signs, signs, 1)
out *= signs[..., np.newaxis]
return out
def _skew_symmetric_cross(a):
"""Compute the skew-symmetric cross product of a vector."""
return np.array([[0., -a[2], a[1]], [a[2], 0., -a[0]], [-a[1], a[0], 0.]])
def _find_vector_rotation(a, b):
"""Find the rotation matrix that maps unit vector a to b."""
# Rodrigues' rotation formula:
# https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula
# http://math.stackexchange.com/a/476311
R = np.eye(3)
v = np.cross(a, b)
if np.allclose(v, 0.): # identical
return R
s = np.dot(v, v) # sine of the angle between them
c = np.dot(a, b) # cosine of the angle between them
vx = _skew_symmetric_cross(v)
R += vx + np.dot(vx, vx) * (1 - c) / s
return R
@jit()
def _fit_matched_points(p, x, weights=None, scale=False):
"""Fit matched points using an analytical formula."""
# Follow notation of <NAME> and <NAME>, A Method for
# Registration of 3-D Shapes, IEEE Trans. Patt. Anal. Machine Intell., 14,
# 239 - 255, 1992.
#
# The original method is actually by Horn, Closed-form solution of absolute
# orientation using unit quaternions, J Opt. Soc. Amer. A vol 4 no 4
# pp 629-642, Apr. 1987. This paper describes how weights can be
# easily incorporated, and a uniform scale factor can be computed.
#
# Caution: This can be dangerous if there are 3 points, or 4 points in
# a symmetric layout, as the geometry can be explained
# equivalently under 180 degree rotations.
#
# Eventually this can be extended to also handle a uniform scale factor,
# as well.
assert p.shape == x.shape
assert p.ndim == 2
assert p.shape[1] == 3
# (weighted) centroids
if weights is None:
mu_p = mean(p, axis=0) # eq 23
mu_x = mean(x, axis=0)
dots = np.dot(p.T, x)
dots /= p.shape[0]
else:
weights_ = np.reshape(weights / weights.sum(), (weights.size, 1))
mu_p = np.dot(weights_.T, p)[0]
mu_x = np.dot(weights_.T, x)[0]
dots = np.dot(p.T, weights_ * x)
Sigma_px = dots - np.outer(mu_p, mu_x) # eq 24
# x and p should no longer be used
A_ij = Sigma_px - Sigma_px.T
Delta = np.array([A_ij[1, 2], A_ij[2, 0], A_ij[0, 1]])
tr_Sigma_px = np.trace(Sigma_px)
# "N" in Horn:
Q = np.empty((4, 4))
Q[0, 0] = tr_Sigma_px
Q[0, 1:] = Delta
Q[1:, 0] = Delta
Q[1:, 1:] = Sigma_px + Sigma_px.T - tr_Sigma_px * np.eye(3)
_, v = np.linalg.eigh(Q) # sorted ascending
quat = np.empty(6)
quat[:3] = v[1:, -1]
if v[0, -1] != 0:
quat[:3] *= np.sign(v[0, -1])
rot = quat_to_rot(quat[:3])
# scale factor is easy once we know the rotation
if scale: # p is "right" (from), x is "left" (to) in Horn 1987
dev_x = x - mu_x
dev_p = p - mu_p
dev_x *= dev_x
dev_p *= dev_p
if weights is not None:
dev_x *= weights_
dev_p *= weights_
s = np.sqrt(np.sum(dev_x) / np.sum(dev_p))
else:
s = 1.
# translation is easy once rotation and scale are known
quat[3:] = mu_x - s * np.dot(rot, mu_p)
return quat, s
def _average_quats(quats, weights=None):
"""Average unit quaternions properly."""
from scipy import linalg
assert quats.ndim == 2 and quats.shape[1] in (3, 4)
if weights is None:
weights = np.ones(quats.shape[0])
assert (weights >= 0).all()
norm = weights.sum()
if weights.sum() == 0:
return np.zeros(3)
weights = weights / norm
# The naive step here would be:
#
# avg_quat = np.dot(weights, quats[:, :3])
#
# But this is not robust to quaternions having sign ambiguity,
# i.e., q == -q. Thus we instead use the rank 1 update method:
#
# https://arc.aiaa.org/doi/abs/10.2514/1.28949?journalCode=jgcd
# https://github.com/tolgabirdal/averaging_quaternions/blob/master/wavg_quaternion_markley.m # noqa: E501
#
# We use unit quats and don't store the last element, so reconstruct it
# to get our 4-element quaternions:
quats = np.concatenate((_quat_real(quats)[..., np.newaxis], quats), -1)
quats *= weights[:, np.newaxis]
A = np.einsum('ij,ik->jk', quats, quats) # sum of outer product of each q
avg_quat = linalg.eigh(A)[1][:, -1] # largest eigenvector is the avg
# Same as the largest eigenvector from the concatenation of all as
# linalg.svd(quats, full_matrices=False)[-1][0], but faster.
#
# By local convention we take the real term (which we remove from our
# representation) as positive. Since it can be zero, let's just ensure
# that the first non-zero element is positive. This shouldn't matter once
# we go to a rotation matrix, but it's nice for testing to have
# consistency.
avg_quat *= np.sign(avg_quat[avg_quat != 0][0])
avg_quat = avg_quat[1:]
return avg_quat
@fill_doc
def read_ras_mni_t(subject, subjects_dir=None):
"""Read a subject's RAS to MNI transform.
Parameters
----------
subject : str
The subject.
%(subjects_dir)s
Returns
-------
ras_mni_t : instance of Transform
The transform from RAS to MNI (in mm).
"""
subjects_dir = get_subjects_dir(subjects_dir=subjects_dir,
raise_error=True)
_validate_type(subject, 'str', 'subject')
fname = op.join(subjects_dir, subject, 'mri', 'transforms',
'talairach.xfm')
fname = _check_fname(
fname, 'read', True, 'FreeSurfer Talairach transformation file')
return Transform('ras', 'mni_tal', _read_fs_xfm(fname)[0])
def _read_fs_xfm(fname):
"""Read a Freesurfer transform from a .xfm file."""
assert fname.endswith('.xfm')
with open(fname, 'r') as fid:
logger.debug('Reading FreeSurfer talairach.xfm file:\n%s' % fname)
# read lines until we get the string 'Linear_Transform', which precedes
# the data transformation matrix
comp = 'Linear_Transform'
for li, line in enumerate(fid):
if li == 0:
kind = line.strip()
logger.debug('Found: %r' % (kind,))
if line[:len(comp)] == comp:
# we have the right line, so don't read any more
break
else:
raise ValueError('Failed to find "Linear_Transform" string in '
'xfm file:\n%s' % fname)
xfm = list()
# read the transformation matrix (3x4)
for ii, line in enumerate(fid):
digs = [float(s) for s in line.strip('\n;').split()]
xfm.append(digs)
if ii == 2:
break
else:
raise ValueError('Could not find enough linear transform lines')
xfm.append([0., 0., 0., 1.])
xfm = np.array(xfm, dtype=float)
return xfm, kind
def _write_fs_xfm(fname, xfm, kind):
"""Write a Freesurfer transform to a .xfm file."""
with open(fname, 'wb') as fid:
fid.write((kind + '\n\nTtransform_Type = Linear;\n').encode('ascii'))
fid.write(u'Linear_Transform =\n'.encode('ascii'))
for li, line in enumerate(xfm[:-1]):
line = ' '.join(['%0.6f' % part for part in line])
line += '\n' if li < 2 else ';\n'
fid.write(line.encode('ascii'))
def _quat_to_euler(quat):
euler = np.empty(quat.shape)
x, y, z = quat[..., 0], quat[..., 1], quat[..., 2]
w = _quat_real(quat)
np.arctan2(2 * (w * x + y * z), 1 - 2 * (x * x + y * y), out=euler[..., 0])
np.arcsin(2 * (w * y - x * z), out=euler[..., 1])
np.arctan2(2 * (w * z + x * y), 1 - 2 * (y * y + z * z), out=euler[..., 2])
return euler
def _euler_to_quat(euler):
quat = np.empty(euler.shape)
phi, theta, psi = euler[..., 0] / 2, euler[..., 1] / 2, euler[..., 2] / 2
cphi, sphi = np.cos(phi), np.sin(phi)
del phi
ctheta, stheta = np.cos(theta), np.sin(theta)
del theta
cpsi, spsi = np.cos(psi), np.sin(psi)
del psi
mult = np.sign(cphi * ctheta * cpsi + sphi * stheta * spsi)
if np.isscalar(mult):
mult = 1. if mult == 0 else mult
else:
mult[mult == 0] = 1.
mult = mult[..., np.newaxis]
quat[..., 0] = sphi * ctheta * cpsi - cphi * stheta * spsi
quat[..., 1] = cphi * stheta * cpsi + sphi * ctheta * spsi
quat[..., 2] = cphi * ctheta * spsi - sphi * stheta * cpsi
quat *= mult
return quat
###############################################################################
# Affine Registration and SDR
_ORDERED_STEPS = ('translation', 'rigid', 'affine', 'sdr')
def _validate_zooms(zooms):
_validate_type(zooms, (dict, list, tuple, 'numeric', None), 'zooms')
zooms = _handle_default('transform_zooms', zooms)
for key, val in zooms.items():
_check_option('zooms key', key, _ORDERED_STEPS)
if val is not None:
val = tuple(
float(x) for x in np.array(val, dtype=float).ravel())
_check_option(f'len(zooms[{repr(key)})', len(val), (1, 3))
if len(val) == 1:
val = val * 3
for this_zoom in val:
if this_zoom <= 1:
raise ValueError(f'Zooms must be > 1, got {this_zoom}')
zooms[key] = val
return zooms
def _validate_niter(niter):
_validate_type(niter, (dict, list, tuple, None), 'niter')
niter = _handle_default('transform_niter', niter)
for key, value in niter.items():
_check_option('niter key', key, _ORDERED_STEPS)
_check_option(f'len(niter[{repr(key)}])', len(value), (1, 2, 3))
return niter
def _validate_pipeline(pipeline):
_validate_type(pipeline, (str, list, tuple), 'pipeline')
pipeline_defaults = dict(
all=_ORDERED_STEPS,
rigids=_ORDERED_STEPS[:_ORDERED_STEPS.index('rigid') + 1],
affines=_ORDERED_STEPS[:_ORDERED_STEPS.index('affine') + 1])
if isinstance(pipeline, str): # use defaults
_check_option('pipeline', pipeline, ('all', 'rigids', 'affines'),
extra='when str')
pipeline = pipeline_defaults[pipeline]
for ii, step in enumerate(pipeline):
name = f'pipeline[{ii}]'
_validate_type(step, str, name)
_check_option(name, step, _ORDERED_STEPS)
ordered_pipeline = tuple(sorted(
pipeline, key=lambda x: _ORDERED_STEPS.index(x)))
if tuple(pipeline) != ordered_pipeline:
raise ValueError(
f'Steps in pipeline are out of order, expected {ordered_pipeline} '
f'but got {pipeline} instead')
if len(set(pipeline)) != len(pipeline):
raise ValueError('Steps in pipeline should not be repeated')
return tuple(pipeline)
def _compute_r2(a, b):
return 100 * (a.ravel() @ b.ravel()) / \
(np.linalg.norm(a) * np.linalg.norm(b))
def _reslice_normalize(img, zooms):
from dipy.align.reslice import reslice
img_zooms = img.header.get_zooms()[:3]
img_affine = img.affine
img = _get_img_fdata(img)
if zooms is not None:
img, img_affine = reslice(img, img_affine, img_zooms, zooms)
img /= img.max() # normalize
return img, img_affine
@verbose
def compute_volume_registration(moving, static, pipeline='all', zooms=None,
niter=None, verbose=None):
"""Align two volumes using an affine and, optionally, SDR.
Parameters
----------
%(moving)s
%(static)s
%(pipeline)s
zooms : float | tuple | dict | None
The voxel size of volume for each spatial dimension in mm.
If None (default), MRIs won't be resliced (slow, but most accurate).
Can be a tuple to provide separate zooms for each dimension (X/Y/Z),
or a dict with keys ``['translation', 'rigid', 'affine', 'sdr']``
(each with values that are float`, tuple, or None) to provide separate
reslicing/accuracy for the steps.
%(niter)s
%(verbose)s
Returns
-------
%(reg_affine)s
%(sdr_morph)s
Notes
-----
This function is heavily inspired by and extends
:func:`dipy.align.affine_registration
<dipy.align._public.affine_registration>`.
.. versionadded:: 0.24
"""
return _compute_volume_registration(
moving, static, pipeline, zooms, niter)[:2]
def _compute_volume_registration(moving, static, pipeline, zooms, niter):
_require_version('nibabel', 'SDR morph', '2.1.0')
_require_version('dipy', 'SDR morph', '0.10.1')
import nibabel as nib
with np.testing.suppress_warnings():
from dipy.align.imaffine import AffineMap
from dipy.align import (affine_registration, center_of_mass,
translation, rigid, affine,
imwarp, metrics)
# input validation
_validate_type(moving, nib.spatialimages.SpatialImage, 'moving')
_validate_type(static, nib.spatialimages.SpatialImage, 'static')
zooms = _validate_zooms(zooms)
niter = _validate_niter(niter)
pipeline = _validate_pipeline(pipeline)
logger.info('Computing registration...')
# affine optimizations
reg_affine = None
sdr_morph = None
pipeline_options = dict(translation=[center_of_mass, translation],
rigid=[rigid], affine=[affine])
sigmas = [3.0, 1.0, 0.0]
factors = [4, 2, 1]
for i, step in enumerate(pipeline):
# reslice image with zooms
if i == 0 or zooms[step] != zooms[pipeline[i - 1]]:
if zooms[step] is not None:
logger.info(f'Reslicing to zooms={zooms[step]} for {step} ...')
else:
logger.info(f'Using original zooms for {step} ...')
static_zoomed, static_affine = _reslice_normalize(
static, zooms[step])
moving_zoomed, moving_affine = _reslice_normalize(
moving, zooms[step])
logger.info(f'Optimizing {step}:')
if step == 'sdr': # happens last
affine_map = AffineMap(reg_affine, # apply registration here
static_zoomed.shape, static_affine,
moving_zoomed.shape, moving_affine)
moving_zoomed = affine_map.transform(moving_zoomed)
sdr = imwarp.SymmetricDiffeomorphicRegistration(
metrics.CCMetric(3), niter[step])
with wrapped_stdout(indent=' ', cull_newlines=True):
sdr_morph = sdr.optimize(static_zoomed, moving_zoomed,
static_affine, static_affine)
moved_zoomed = sdr_morph.transform(moving_zoomed)
else:
with wrapped_stdout(indent=' ', cull_newlines=True):
moved_zoomed, reg_affine = affine_registration(
moving_zoomed, static_zoomed, moving_affine, static_affine,
nbins=32, metric='MI', pipeline=pipeline_options[step],
level_iters=niter[step], sigmas=sigmas, factors=factors,
starting_affine=reg_affine)
# report some useful information
if step in ('translation', 'rigid'):
dist = np.linalg.norm(reg_affine[:3, 3])
angle = np.rad2deg(_angle_between_quats(
np.zeros(3), rot_to_quat(reg_affine[:3, :3])))
logger.info(f' Translation: {dist:6.1f} mm')
if step == 'rigid':
logger.info(f' Rotation: {angle:6.1f}°')
assert moved_zoomed.shape == static_zoomed.shape, step
r2 = _compute_r2(static_zoomed, moved_zoomed)
logger.info(f' R²: {r2:6.1f}%')
return (reg_affine, sdr_morph, static_zoomed.shape, static_affine,
moving_zoomed.shape, moving_affine)
@verbose
def apply_volume_registration(moving, static, reg_affine, sdr_morph=None,
interpolation='linear', verbose=None):
"""Apply volume registration.
Uses registration parameters computed by
:func:`~mne.transforms.compute_volume_registration`.
Parameters
----------
%(moving)s
%(static)s
%(reg_affine)s
%(sdr_morph)s
interpolation : str
Interpolation to be used during the interpolation.
Can be "linear" (default) or "nearest".
%(verbose)s
Returns
-------
reg_img : instance of SpatialImage
The image after affine (and SDR, if provided) registration.
Notes
-----
.. versionadded:: 0.24
"""
_require_version('nibabel', 'SDR morph', '2.1.0')
_require_version('dipy', 'SDR morph', '0.10.1')
from nibabel.spatialimages import SpatialImage
from dipy.align.imwarp import DiffeomorphicMap
from dipy.align.imaffine import AffineMap
_validate_type(moving, SpatialImage, 'moving')
_validate_type(static, SpatialImage, 'static')
_validate_type(reg_affine, np.ndarray, 'reg_affine')
_check_option('reg_affine.shape', reg_affine.shape, ((4, 4),))
_validate_type(sdr_morph, (DiffeomorphicMap, None), 'sdr_morph')
logger.info('Applying affine registration ...')
moving, moving_affine = np.asarray(moving.dataobj), moving.affine
static, static_affine = np.asarray(static.dataobj), static.affine
affine_map = AffineMap(reg_affine,
static.shape, static_affine,
moving.shape, moving_affine)
reg_data = affine_map.transform(moving, interpolation=interpolation)
if sdr_morph is not None:
logger.info('Appling SDR warp ...')
reg_data = sdr_morph.transform(
reg_data, interpolation=interpolation,
image_world2grid=np.linalg.inv(static_affine),
out_shape=static.shape, out_grid2world=static_affine)
reg_img = SpatialImage(reg_data, static_affine)
logger.info('[done]')
return reg_img
| [
"numpy.trace",
"numpy.arctan2",
"numpy.nan_to_num",
"numpy.maximum",
"numpy.sum",
"numpy.abs",
"numpy.empty",
"numpy.allclose",
"dipy.align.reslice.reslice",
"numpy.einsum",
"numpy.ones",
"numpy.array_str",
"os.path.isfile",
"numpy.sin",
"numpy.linalg.norm",
"scipy.linalg.lstsq",
"os... | [((810, 877), 'numpy.array', 'np.array', (['[[0, -1, 0, 0], [1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]'], {}), '([[0, -1, 0, 0], [1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\n', (818, 877), True, 'import numpy as np\n'), ((7951, 7966), 'numpy.asarray', 'np.asarray', (['pts'], {}), '(pts)\n', (7961, 7966), True, 'import numpy as np\n'), ((8059, 8087), 'numpy.dot', 'np.dot', (['pts', 'trans[:3, :3].T'], {}), '(pts, trans[:3, :3].T)\n', (8065, 8087), True, 'import numpy as np\n'), ((8470, 8479), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (8476, 8479), True, 'import numpy as np\n'), ((8492, 8501), 'numpy.cos', 'np.cos', (['y'], {}), '(y)\n', (8498, 8501), True, 'import numpy as np\n'), ((8514, 8523), 'numpy.cos', 'np.cos', (['z'], {}), '(z)\n', (8520, 8523), True, 'import numpy as np\n'), ((8536, 8545), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (8542, 8545), True, 'import numpy as np\n'), ((8558, 8567), 'numpy.sin', 'np.sin', (['y'], {}), '(y)\n', (8564, 8567), True, 'import numpy as np\n'), ((8580, 8589), 'numpy.sin', 'np.sin', (['z'], {}), '(z)\n', (8586, 8589), True, 'import numpy as np\n'), ((8598, 8890), 'numpy.array', 'np.array', (['[[cos_y * cos_z, -cos_x * sin_z + sin_x * sin_y * cos_z, sin_x * sin_z + \n cos_x * sin_y * cos_z, 0], [cos_y * sin_z, cos_x * cos_z + sin_x *\n sin_y * sin_z, -sin_x * cos_z + cos_x * sin_y * sin_z, 0], [-sin_y, \n sin_x * cos_y, cos_x * cos_y, 0], [0, 0, 0, 1]]'], {'dtype': 'float'}), '([[cos_y * cos_z, -cos_x * sin_z + sin_x * sin_y * cos_z, sin_x *\n sin_z + cos_x * sin_y * cos_z, 0], [cos_y * sin_z, cos_x * cos_z + \n sin_x * sin_y * sin_z, -sin_x * cos_z + cos_x * sin_y * sin_z, 0], [-\n sin_y, sin_x * cos_y, cos_x * cos_y, 0], [0, 0, 0, 1]], dtype=float)\n', (8606, 8890), True, 'import numpy as np\n'), ((9278, 9287), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (9284, 9287), True, 'import numpy as np\n'), ((9300, 9309), 'numpy.cos', 'np.cos', (['y'], {}), '(y)\n', (9306, 9309), True, 'import numpy as np\n'), ((9322, 9331), 'numpy.cos', 'np.cos', (['z'], {}), '(z)\n', (9328, 9331), True, 'import numpy as np\n'), ((9344, 9353), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (9350, 9353), True, 'import numpy as np\n'), ((9366, 9375), 'numpy.sin', 'np.sin', (['y'], {}), '(y)\n', (9372, 9375), True, 'import numpy as np\n'), ((9388, 9397), 'numpy.sin', 'np.sin', (['z'], {}), '(z)\n', (9394, 9397), True, 'import numpy as np\n'), ((9406, 9673), 'numpy.array', 'np.array', (['[[cos_y * cos_z, -cos_x * sin_z + sin_x * sin_y * cos_z, sin_x * sin_z + \n cos_x * sin_y * cos_z], [cos_y * sin_z, cos_x * cos_z + sin_x * sin_y *\n sin_z, -sin_x * cos_z + cos_x * sin_y * sin_z], [-sin_y, sin_x * cos_y,\n cos_x * cos_y]]'], {'dtype': 'float'}), '([[cos_y * cos_z, -cos_x * sin_z + sin_x * sin_y * cos_z, sin_x *\n sin_z + cos_x * sin_y * cos_z], [cos_y * sin_z, cos_x * cos_z + sin_x *\n sin_y * sin_z, -sin_x * cos_z + cos_x * sin_y * sin_z], [-sin_y, sin_x *\n cos_y, cos_x * cos_y]], dtype=float)\n', (9414, 9673), True, 'import numpy as np\n'), ((10181, 10197), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (10189, 10197), True, 'import numpy as np\n'), ((11538, 11566), 'numpy.arctan2', 'np.arctan2', (['m[2, 1]', 'm[2, 2]'], {}), '(m[2, 1], m[2, 2])\n', (11548, 11566), True, 'import numpy as np\n'), ((11576, 11612), 'numpy.sqrt', 'np.sqrt', (['(m[0, 0] ** 2 + m[1, 0] ** 2)'], {}), '(m[0, 0] ** 2 + m[1, 0] ** 2)\n', (11583, 11612), True, 'import numpy as np\n'), ((11621, 11645), 'numpy.arctan2', 'np.arctan2', (['(-m[2, 0])', 'c2'], {}), '(-m[2, 0], c2)\n', (11631, 11645), True, 'import numpy as np\n'), ((11655, 11664), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (11661, 11664), True, 'import numpy as np\n'), ((11674, 11683), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (11680, 11683), True, 'import numpy as np\n'), ((11692, 11760), 'numpy.arctan2', 'np.arctan2', (['(s1 * m[0, 2] - c1 * m[0, 1])', '(c1 * m[1, 1] - s1 * m[1, 2])'], {}), '(s1 * m[0, 2] - c1 * m[0, 1], c1 * m[1, 1] - s1 * m[1, 2])\n', (11702, 11760), True, 'import numpy as np\n'), ((12032, 12111), 'numpy.array', 'np.array', (['[[x, 0, 0, 0], [0, y, 0, 0], [0, 0, z, 0], [0, 0, 0, 1]]'], {'dtype': 'float'}), '([[x, 0, 0, 0], [0, y, 0, 0], [0, 0, z, 0], [0, 0, 0, 1]], dtype=float)\n', (12040, 12111), True, 'import numpy as np\n'), ((12450, 12529), 'numpy.array', 'np.array', (['[[1, 0, 0, x], [0, 1, 0, y], [0, 0, 1, z], [0, 0, 0, 1]]'], {'dtype': 'float'}), '([[1, 0, 0, x], [0, 1, 0, y], [0, 0, 1, z], [0, 0, 0, 1]], dtype=float)\n', (12458, 12529), True, 'import numpy as np\n'), ((20609, 20627), 'numpy.asarray', 'np.asarray', (['nasion'], {}), '(nasion)\n', (20619, 20627), True, 'import numpy as np\n'), ((20638, 20653), 'numpy.asarray', 'np.asarray', (['lpa'], {}), '(lpa)\n', (20648, 20653), True, 'import numpy as np\n'), ((20664, 20679), 'numpy.asarray', 'np.asarray', (['rpa'], {}), '(rpa)\n', (20674, 20679), True, 'import numpy as np\n'), ((21130, 21165), 'numpy.cross', 'np.cross', (['right_unit', 'anterior_unit'], {}), '(right_unit, anterior_unit)\n', (21138, 21165), True, 'import numpy as np\n'), ((21208, 21228), 'dipy.align.translation', 'translation', (['x', 'y', 'z'], {}), '(x, y, z)\n', (21219, 21228), False, 'from dipy.align import affine_registration, center_of_mass, translation, rigid, affine, imwarp, metrics\n'), ((21244, 21308), 'numpy.vstack', 'np.vstack', (['(right_unit, anterior_unit, superior_unit, [0, 0, 0])'], {}), '((right_unit, anterior_unit, superior_unit, [0, 0, 0]))\n', (21253, 21308), True, 'import numpy as np\n'), ((21323, 21355), 'numpy.reshape', 'np.reshape', (['[0, 0, 0, 1]', '(4, 1)'], {}), '([0, 0, 0, 1], (4, 1))\n', (21333, 21355), True, 'import numpy as np\n'), ((21372, 21401), 'numpy.hstack', 'np.hstack', (['(trans_l, trans_r)'], {}), '((trans_l, trans_r))\n', (21381, 21401), True, 'import numpy as np\n'), ((21415, 21446), 'numpy.dot', 'np.dot', (['rot_trans', 'origin_trans'], {}), '(rot_trans, origin_trans)\n', (21421, 21446), True, 'import numpy as np\n'), ((22831, 22850), 'numpy.atleast_2d', 'np.atleast_2d', (['cart'], {}), '(cart)\n', (22844, 22850), True, 'import numpy as np\n'), ((22950, 22987), 'numpy.where', 'np.where', (['(out[:, 0] > 0)', 'out[:, 0]', '(1)'], {}), '(out[:, 0] > 0, out[:, 0], 1)\n', (22958, 22987), True, 'import numpy as np\n'), ((23027, 23061), 'numpy.arctan2', 'np.arctan2', (['cart[:, 1]', 'cart[:, 0]'], {}), '(cart[:, 1], cart[:, 0])\n', (23037, 23061), True, 'import numpy as np\n'), ((23078, 23106), 'numpy.arccos', 'np.arccos', (['(cart[:, 2] / norm)'], {}), '(cart[:, 2] / norm)\n', (23087, 23106), True, 'import numpy as np\n'), ((23117, 23135), 'numpy.nan_to_num', 'np.nan_to_num', (['out'], {}), '(out)\n', (23130, 23135), True, 'import numpy as np\n'), ((23610, 23632), 'numpy.atleast_2d', 'np.atleast_2d', (['sph_pts'], {}), '(sph_pts)\n', (23623, 23632), True, 'import numpy as np\n'), ((24339, 24361), 'numpy.asarray', 'np.asarray', (['order', 'int'], {}), '(order, int)\n', (24349, 24361), True, 'import numpy as np\n'), ((25699, 25740), 'numpy.einsum', 'np.einsum', (['"""ijk,kj->ki"""', 'trans', 'sph_grads'], {}), "('ijk,kj->ki', trans, sph_grads)\n", (25708, 25740), True, 'import numpy as np\n'), ((32255, 32276), 'numpy.zeros_like', 'np.zeros_like', (['distsq'], {}), '(distsq)\n', (32268, 32276), True, 'import numpy as np\n'), ((40051, 40086), 'numpy.maximum', 'np.maximum', (['(1.0 - bb - cc - dd)', '(0.0)'], {}), '(1.0 - bb - cc - dd, 0.0)\n', (40061, 40086), True, 'import numpy as np\n'), ((40093, 40104), 'numpy.sqrt', 'np.sqrt', (['aa'], {}), '(aa)\n', (40100, 40104), True, 'import numpy as np\n'), ((40246, 40280), 'numpy.empty', 'np.empty', (['(quat.shape[:-1] + (3, 3))'], {}), '(quat.shape[:-1] + (3, 3))\n', (40254, 40280), True, 'import numpy as np\n'), ((41913, 41935), 'numpy.array', 'np.array', (['(qx, qy, qz)'], {}), '((qx, qy, qz))\n', (41921, 41935), True, 'import numpy as np\n'), ((42378, 42424), 'numpy.apply_along_axis', 'np.apply_along_axis', (['_one_rot_to_quat', '(-1)', 'rot'], {}), '(_one_rot_to_quat, -1, rot)\n', (42397, 42424), True, 'import numpy as np\n'), ((42497, 42506), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (42503, 42506), True, 'import numpy as np\n'), ((43928, 43938), 'numpy.sign', 'np.sign', (['w'], {}), '(w)\n', (43935, 43938), True, 'import numpy as np\n'), ((43951, 43976), 'numpy.where', 'np.where', (['signs', 'signs', '(1)'], {}), '(signs, signs, 1)\n', (43959, 43976), True, 'import numpy as np\n'), ((44133, 44203), 'numpy.array', 'np.array', (['[[0.0, -a[2], a[1]], [a[2], 0.0, -a[0]], [-a[1], a[0], 0.0]]'], {}), '([[0.0, -a[2], a[1]], [a[2], 0.0, -a[0]], [-a[1], a[0], 0.0]])\n', (44141, 44203), True, 'import numpy as np\n'), ((44459, 44468), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (44465, 44468), True, 'import numpy as np\n'), ((44477, 44491), 'numpy.cross', 'np.cross', (['a', 'b'], {}), '(a, b)\n', (44485, 44491), True, 'import numpy as np\n'), ((44499, 44518), 'numpy.allclose', 'np.allclose', (['v', '(0.0)'], {}), '(v, 0.0)\n', (44510, 44518), True, 'import numpy as np\n'), ((44557, 44569), 'numpy.dot', 'np.dot', (['v', 'v'], {}), '(v, v)\n', (44563, 44569), True, 'import numpy as np\n'), ((44612, 44624), 'numpy.dot', 'np.dot', (['a', 'b'], {}), '(a, b)\n', (44618, 44624), True, 'import numpy as np\n'), ((46237, 46283), 'numpy.array', 'np.array', (['[A_ij[1, 2], A_ij[2, 0], A_ij[0, 1]]'], {}), '([A_ij[1, 2], A_ij[2, 0], A_ij[0, 1]])\n', (46245, 46283), True, 'import numpy as np\n'), ((46302, 46320), 'numpy.trace', 'np.trace', (['Sigma_px'], {}), '(Sigma_px)\n', (46310, 46320), True, 'import numpy as np\n'), ((46348, 46364), 'numpy.empty', 'np.empty', (['(4, 4)'], {}), '((4, 4))\n', (46356, 46364), True, 'import numpy as np\n'), ((46508, 46525), 'numpy.linalg.eigh', 'np.linalg.eigh', (['Q'], {}), '(Q)\n', (46522, 46525), True, 'import numpy as np\n'), ((46557, 46568), 'numpy.empty', 'np.empty', (['(6)'], {}), '(6)\n', (46565, 46568), True, 'import numpy as np\n'), ((48241, 48277), 'numpy.einsum', 'np.einsum', (['"""ij,ik->jk"""', 'quats', 'quats'], {}), "('ij,ik->jk', quats, quats)\n", (48250, 48277), True, 'import numpy as np\n'), ((48858, 48893), 'numpy.sign', 'np.sign', (['avg_quat[avg_quat != 0][0]'], {}), '(avg_quat[avg_quat != 0][0])\n', (48865, 48893), True, 'import numpy as np\n'), ((49432, 49500), 'os.path.join', 'op.join', (['subjects_dir', 'subject', '"""mri"""', '"""transforms"""', '"""talairach.xfm"""'], {}), "(subjects_dir, subject, 'mri', 'transforms', 'talairach.xfm')\n", (49439, 49500), True, 'import os.path as op\n'), ((50872, 50898), 'numpy.array', 'np.array', (['xfm'], {'dtype': 'float'}), '(xfm, dtype=float)\n', (50880, 50898), True, 'import numpy as np\n'), ((51424, 51444), 'numpy.empty', 'np.empty', (['quat.shape'], {}), '(quat.shape)\n', (51432, 51444), True, 'import numpy as np\n'), ((51529, 51604), 'numpy.arctan2', 'np.arctan2', (['(2 * (w * x + y * z))', '(1 - 2 * (x * x + y * y))'], {'out': 'euler[..., 0]'}), '(2 * (w * x + y * z), 1 - 2 * (x * x + y * y), out=euler[..., 0])\n', (51539, 51604), True, 'import numpy as np\n'), ((51609, 51658), 'numpy.arcsin', 'np.arcsin', (['(2 * (w * y - x * z))'], {'out': 'euler[..., 1]'}), '(2 * (w * y - x * z), out=euler[..., 1])\n', (51618, 51658), True, 'import numpy as np\n'), ((51663, 51738), 'numpy.arctan2', 'np.arctan2', (['(2 * (w * z + x * y))', '(1 - 2 * (y * y + z * z))'], {'out': 'euler[..., 2]'}), '(2 * (w * z + x * y), 1 - 2 * (y * y + z * z), out=euler[..., 2])\n', (51673, 51738), True, 'import numpy as np\n'), ((51796, 51817), 'numpy.empty', 'np.empty', (['euler.shape'], {}), '(euler.shape)\n', (51804, 51817), True, 'import numpy as np\n'), ((52079, 52131), 'numpy.sign', 'np.sign', (['(cphi * ctheta * cpsi + sphi * stheta * spsi)'], {}), '(cphi * ctheta * cpsi + sphi * stheta * spsi)\n', (52086, 52131), True, 'import numpy as np\n'), ((52139, 52156), 'numpy.isscalar', 'np.isscalar', (['mult'], {}), '(mult)\n', (52150, 52156), True, 'import numpy as np\n'), ((61347, 61426), 'dipy.align.imaffine.AffineMap', 'AffineMap', (['reg_affine', 'static.shape', 'static_affine', 'moving.shape', 'moving_affine'], {}), '(reg_affine, static.shape, static_affine, moving.shape, moving_affine)\n', (61356, 61426), False, 'from dipy.align.imaffine import AffineMap\n'), ((61858, 61895), 'nibabel.spatialimages.SpatialImage', 'SpatialImage', (['reg_data', 'static_affine'], {}), '(reg_data, static_affine)\n', (61870, 61895), False, 'from nibabel.spatialimages import SpatialImage\n'), ((5969, 5983), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (5977, 5983), False, 'from copy import deepcopy\n'), ((7009, 7054), 'os.path.join', 'op.join', (['subjects_dir', 'subject', '"""*-trans.fif"""'], {}), "(subjects_dir, subject, '*-trans.fif')\n", (7016, 7054), True, 'import os.path as op\n'), ((10143, 10172), 'numpy.linalg.norm', 'np.linalg.norm', (['target_z_axis'], {}), '(target_z_axis)\n', (10157, 10172), True, 'import numpy as np\n'), ((16668, 16711), 'numpy.dot', 'np.dot', (["t_second['trans']", "t_first['trans']"], {}), "(t_second['trans'], t_first['trans'])\n", (16674, 16711), True, 'import numpy as np\n'), ((18578, 18607), 'numpy.linalg.inv', 'np.linalg.inv', (["trans['trans']"], {}), "(trans['trans'])\n", (18591, 18607), True, 'import numpy as np\n'), ((19193, 19207), 'copy.deepcopy', 'deepcopy', (['surf'], {}), '(surf)\n', (19201, 19207), False, 'from copy import deepcopy\n'), ((20933, 20954), 'numpy.linalg.norm', 'np.linalg.norm', (['right'], {}), '(right)\n', (20947, 20954), True, 'import numpy as np\n'), ((21084, 21108), 'numpy.linalg.norm', 'np.linalg.norm', (['anterior'], {}), '(anterior)\n', (21098, 21108), True, 'import numpy as np\n'), ((22910, 22937), 'numpy.sum', 'np.sum', (['(cart * cart)'], {'axis': '(1)'}), '(cart * cart, axis=1)\n', (22916, 22937), True, 'import numpy as np\n'), ((23713, 23734), 'numpy.cos', 'np.cos', (['sph_pts[:, 2]'], {}), '(sph_pts[:, 2])\n', (23719, 23734), True, 'import numpy as np\n'), ((23760, 23781), 'numpy.sin', 'np.sin', (['sph_pts[:, 2]'], {}), '(sph_pts[:, 2])\n', (23766, 23781), True, 'import numpy as np\n'), ((23808, 23829), 'numpy.cos', 'np.cos', (['sph_pts[:, 1]'], {}), '(sph_pts[:, 1])\n', (23814, 23829), True, 'import numpy as np\n'), ((23856, 23877), 'numpy.sin', 'np.sin', (['sph_pts[:, 1]'], {}), '(sph_pts[:, 1])\n', (23862, 23877), True, 'import numpy as np\n'), ((25445, 25455), 'numpy.cos', 'np.cos', (['az'], {}), '(az)\n', (25451, 25455), True, 'import numpy as np\n'), ((25457, 25467), 'numpy.sin', 'np.sin', (['az'], {}), '(az)\n', (25463, 25467), True, 'import numpy as np\n'), ((25485, 25496), 'numpy.cos', 'np.cos', (['pol'], {}), '(pol)\n', (25491, 25496), True, 'import numpy as np\n'), ((25498, 25509), 'numpy.sin', 'np.sin', (['pol'], {}), '(pol)\n', (25504, 25509), True, 'import numpy as np\n'), ((26675, 26686), 'numpy.real', 'np.real', (['sh'], {}), '(sh)\n', (26682, 26686), True, 'import numpy as np\n'), ((31529, 31547), 'numpy.zeros_like', 'np.zeros_like', (['pts'], {}), '(pts)\n', (31542, 31547), True, 'import numpy as np\n'), ((32364, 32377), 'numpy.log', 'np.log', (['valid'], {}), '(valid)\n', (32370, 32377), True, 'import numpy as np\n'), ((35132, 35143), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (35140, 35143), True, 'import numpy as np\n'), ((37842, 37873), 'numpy.array', 'np.array', (['[source, destination]'], {}), '([source, destination])\n', (37850, 37873), True, 'import numpy as np\n'), ((39317, 39339), 'numpy.deg2rad', 'np.deg2rad', (['topo[:, 0]'], {}), '(topo[:, 0])\n', (39327, 39339), True, 'import numpy as np\n'), ((40898, 40921), 'numpy.reshape', 'np.reshape', (['rot', '(3, 3)'], {}), '(rot, (3, 3))\n', (40908, 40921), True, 'import numpy as np\n'), ((40930, 40947), 'numpy.abs', 'np.abs', (['(det - 1.0)'], {}), '(det - 1.0)\n', (40936, 40947), True, 'import numpy as np\n'), ((43093, 43207), 'numpy.maximum', 'np.maximum', (['(1.0 - quat[..., 0] * quat[..., 0] - quat[..., 1] * quat[..., 1] - quat[...,\n 2] * quat[..., 2])', '(0.0)'], {}), '(1.0 - quat[..., 0] * quat[..., 0] - quat[..., 1] * quat[..., 1] -\n quat[..., 2] * quat[..., 2], 0.0)\n', (43103, 43207), True, 'import numpy as np\n'), ((45854, 45868), 'numpy.dot', 'np.dot', (['p.T', 'x'], {}), '(p.T, x)\n', (45860, 45868), True, 'import numpy as np\n'), ((46075, 46100), 'numpy.dot', 'np.dot', (['p.T', '(weights_ * x)'], {}), '(p.T, weights_ * x)\n', (46081, 46100), True, 'import numpy as np\n'), ((46123, 46143), 'numpy.outer', 'np.outer', (['mu_p', 'mu_x'], {}), '(mu_p, mu_x)\n', (46131, 46143), True, 'import numpy as np\n'), ((46636, 46653), 'numpy.sign', 'np.sign', (['v[0, -1]'], {}), '(v[0, -1])\n', (46643, 46653), True, 'import numpy as np\n'), ((47409, 47432), 'numpy.ones', 'np.ones', (['quats.shape[0]'], {}), '(quats.shape[0])\n', (47416, 47432), True, 'import numpy as np\n'), ((47532, 47543), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (47540, 47543), True, 'import numpy as np\n'), ((51913, 51924), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (51919, 51924), True, 'import numpy as np\n'), ((51926, 51937), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (51932, 51937), True, 'import numpy as np\n'), ((51971, 51984), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (51977, 51984), True, 'import numpy as np\n'), ((51986, 51999), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (51992, 51999), True, 'import numpy as np\n'), ((52031, 52042), 'numpy.cos', 'np.cos', (['psi'], {}), '(psi)\n', (52037, 52042), True, 'import numpy as np\n'), ((52044, 52055), 'numpy.sin', 'np.sin', (['psi'], {}), '(psi)\n', (52050, 52055), True, 'import numpy as np\n'), ((55133, 55175), 'dipy.align.reslice.reslice', 'reslice', (['img', 'img_affine', 'img_zooms', 'zooms'], {}), '(img, img_affine, img_zooms, zooms)\n', (55140, 55175), False, 'from dipy.align.reslice import reslice\n'), ((56581, 56611), 'numpy.testing.suppress_warnings', 'np.testing.suppress_warnings', ([], {}), '()\n', (56609, 56611), True, 'import numpy as np\n'), ((61218, 61244), 'numpy.asarray', 'np.asarray', (['moving.dataobj'], {}), '(moving.dataobj)\n', (61228, 61244), True, 'import numpy as np\n'), ((61288, 61314), 'numpy.asarray', 'np.asarray', (['static.dataobj'], {}), '(static.dataobj)\n', (61298, 61314), True, 'import numpy as np\n'), ((3702, 3711), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (3708, 3711), True, 'import numpy as np\n'), ((3734, 3763), 'numpy.asarray', 'np.asarray', (['trans', 'np.float64'], {}), '(trans, np.float64)\n', (3744, 3763), True, 'import numpy as np\n'), ((4035, 4065), 'numpy.printoptions', 'np.printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (4050, 4065), True, 'import numpy as np\n'), ((4850, 4914), 'numpy.allclose', 'np.allclose', (["self['trans']", "other['trans']"], {'rtol': 'rtol', 'atol': 'atol'}), "(self['trans'], other['trans'], rtol=rtol, atol=atol)\n", (4861, 4914), True, 'import numpy as np\n'), ((11032, 11048), 'numpy.linalg.det', 'np.linalg.det', (['r'], {}), '(r)\n', (11045, 11048), True, 'import numpy as np\n'), ((14379, 14395), 'os.path.isfile', 'op.isfile', (['trans'], {}), '(trans)\n', (14388, 14395), True, 'import os.path as op\n'), ((14709, 14729), 'numpy.genfromtxt', 'np.genfromtxt', (['trans'], {}), '(trans)\n', (14722, 14729), True, 'import numpy as np\n'), ((20975, 21007), 'numpy.dot', 'np.dot', (['(nasion - lpa)', 'right_unit'], {}), '(nasion - lpa, right_unit)\n', (20981, 21007), True, 'import numpy as np\n'), ((26712, 26724), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (26719, 26724), True, 'import numpy as np\n'), ((27283, 27295), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (27290, 27295), True, 'import numpy as np\n'), ((27667, 27700), 'scipy.special.sph_harm', 'sph_harm', (['order_', 'degree', 'az', 'pol'], {}), '(order_, degree, az, pol)\n', (27675, 27700), False, 'from scipy.special import sph_harm\n'), ((30396, 30437), 'scipy.spatial.distance.cdist', 'cdist', (['source', 'destination', '"""sqeuclidean"""'], {}), "(source, destination, 'sqeuclidean')\n", (30401, 30437), False, 'from scipy.spatial.distance import cdist\n'), ((30841, 30859), 'numpy.eye', 'np.eye', (['L.shape[0]'], {}), '(L.shape[0])\n', (30847, 30859), True, 'import numpy as np\n'), ((30931, 30949), 'scipy.linalg.lstsq', 'linalg.lstsq', (['L', 'Y'], {}), '(L, Y)\n', (30943, 30949), False, 'from scipy import linalg\n'), ((31704, 31733), 'numpy.array_split', 'np.array_split', (['out', 'n_splits'], {}), '(out, n_splits)\n', (31718, 31733), True, 'import numpy as np\n'), ((31773, 31802), 'numpy.array_split', 'np.array_split', (['pts', 'n_splits'], {}), '(pts, n_splits)\n', (31787, 31802), True, 'import numpy as np\n'), ((31982, 32006), 'numpy.dot', 'np.dot', (['L', 'self._weights'], {}), '(L, self._weights)\n', (31988, 32006), True, 'import numpy as np\n'), ((35227, 35296), 'numpy.array', 'np.array', (['[p for p in source if not (p[2] < -1e-06 and p[1] > 1e-06)]'], {}), '([p for p in source if not (p[2] < -1e-06 and p[1] > 1e-06)])\n', (35235, 35296), True, 'import numpy as np\n'), ((35439, 35504), 'numpy.array', 'np.array', (['[p for p in destination if not (p[2] < 0 and p[1] > 0)]'], {}), '([p for p in destination if not (p[2] < 0 and p[1] > 0)])\n', (35447, 35504), True, 'import numpy as np\n'), ((36873, 36913), 'scipy.linalg.lstsq', 'linalg.lstsq', (['src_sph', 'src_rad_az_pol[0]'], {}), '(src_sph, src_rad_az_pol[0])\n', (36885, 36913), False, 'from scipy import linalg\n'), ((36939, 36981), 'scipy.linalg.lstsq', 'linalg.lstsq', (['dest_sph', 'dest_rad_az_pol[0]'], {}), '(dest_sph, dest_rad_az_pol[0])\n', (36951, 36981), False, 'from scipy import linalg\n'), ((37296, 37325), 'numpy.dot', 'np.dot', (['match_sph', 'src_coeffs'], {}), '(match_sph, src_coeffs)\n', (37302, 37325), True, 'import numpy as np\n'), ((37413, 37443), 'numpy.dot', 'np.dot', (['match_sph', 'dest_coeffs'], {}), '(match_sph, dest_coeffs)\n', (37419, 37443), True, 'import numpy as np\n'), ((38877, 38894), 'numpy.cos', 'np.cos', (['pol[:, 1]'], {}), '(pol[:, 1])\n', (38883, 38894), True, 'import numpy as np\n'), ((38927, 38944), 'numpy.sin', 'np.sin', (['pol[:, 1]'], {}), '(pol[:, 1])\n', (38933, 38944), True, 'import numpy as np\n'), ((39010, 39027), 'numpy.sin', 'np.sin', (['pol[:, 2]'], {}), '(pol[:, 2])\n', (39016, 39027), True, 'import numpy as np\n'), ((39052, 39069), 'numpy.cos', 'np.cos', (['pol[:, 1]'], {}), '(pol[:, 1])\n', (39058, 39069), True, 'import numpy as np\n'), ((39094, 39111), 'numpy.sin', 'np.sin', (['pol[:, 1]'], {}), '(pol[:, 1])\n', (39100, 39111), True, 'import numpy as np\n'), ((41084, 41103), 'numpy.finfo', 'np.finfo', (['rot.dtype'], {}), '(rot.dtype)\n', (41092, 41103), True, 'import numpy as np\n'), ((41121, 41131), 'numpy.sqrt', 'np.sqrt', (['t'], {}), '(t)\n', (41128, 41131), True, 'import numpy as np\n'), ((42920, 42946), 'numpy.linalg.norm', 'np.linalg.norm', (['z'], {'axis': '(-1)'}), '(z, axis=-1)\n', (42934, 42946), True, 'import numpy as np\n'), ((43437, 43459), 'numpy.broadcast', 'np.broadcast', (['one', 'two'], {}), '(one, two)\n', (43449, 43459), True, 'import numpy as np\n'), ((45995, 46016), 'numpy.dot', 'np.dot', (['weights_.T', 'p'], {}), '(weights_.T, p)\n', (46001, 46016), True, 'import numpy as np\n'), ((46035, 46056), 'numpy.dot', 'np.dot', (['weights_.T', 'x'], {}), '(weights_.T, x)\n', (46041, 46056), True, 'import numpy as np\n'), ((46487, 46496), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (46493, 46496), True, 'import numpy as np\n'), ((47157, 47174), 'numpy.dot', 'np.dot', (['rot', 'mu_p'], {}), '(rot, mu_p)\n', (47163, 47174), True, 'import numpy as np\n'), ((48327, 48341), 'scipy.linalg.eigh', 'linalg.eigh', (['A'], {}), '(A)\n', (48338, 48341), False, 'from scipy import linalg\n'), ((54860, 54877), 'numpy.linalg.norm', 'np.linalg.norm', (['a'], {}), '(a)\n', (54874, 54877), True, 'import numpy as np\n'), ((54880, 54897), 'numpy.linalg.norm', 'np.linalg.norm', (['b'], {}), '(b)\n', (54894, 54897), True, 'import numpy as np\n'), ((58069, 58167), 'dipy.align.imaffine.AffineMap', 'AffineMap', (['reg_affine', 'static_zoomed.shape', 'static_affine', 'moving_zoomed.shape', 'moving_affine'], {}), '(reg_affine, static_zoomed.shape, static_affine, moving_zoomed.\n shape, moving_affine)\n', (58078, 58167), False, 'from dipy.align.imaffine import AffineMap\n'), ((10994, 11008), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (11005, 11008), True, 'import numpy as np\n'), ((14270, 14290), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (14280, 14290), True, 'import os.path as op\n'), ((14469, 14487), 'os.path.splitext', 'op.splitext', (['trans'], {}), '(trans)\n', (14480, 14487), True, 'import os.path as op\n'), ((25652, 25671), 'numpy.zeros_like', 'np.zeros_like', (['c_as'], {}), '(c_as)\n', (25665, 25671), True, 'import numpy as np\n'), ((30569, 30598), 'numpy.ones', 'np.ones', (['(source.shape[0], 1)'], {}), '((source.shape[0], 1))\n', (30576, 30598), True, 'import numpy as np\n'), ((30641, 30662), 'numpy.hstack', 'np.hstack', (['[dists, P]'], {}), '([dists, P])\n', (30650, 30662), True, 'import numpy as np\n'), ((30765, 30781), 'numpy.zeros', 'np.zeros', (['(4, 3)'], {}), '((4, 3))\n', (30773, 30781), True, 'import numpy as np\n'), ((31830, 31879), 'scipy.spatial.distance.cdist', 'cdist', (['this_pts', 'self._destination', '"""sqeuclidean"""'], {}), "(this_pts, self._destination, 'sqeuclidean')\n", (31835, 31879), False, 'from scipy.spatial.distance import cdist\n'), ((41324, 41363), 'numpy.sqrt', 'np.sqrt', (['(1.0 + rot[0] - rot[4] - rot[8])'], {}), '(1.0 + rot[0] - rot[4] - rot[8])\n', (41331, 41363), True, 'import numpy as np\n'), ((44709, 44723), 'numpy.dot', 'np.dot', (['vx', 'vx'], {}), '(vx, vx)\n', (44715, 44723), True, 'import numpy as np\n'), ((47015, 47028), 'numpy.sum', 'np.sum', (['dev_x'], {}), '(dev_x)\n', (47021, 47028), True, 'import numpy as np\n'), ((47031, 47044), 'numpy.sum', 'np.sum', (['dev_p'], {}), '(dev_p)\n', (47037, 47044), True, 'import numpy as np\n'), ((58401, 58420), 'dipy.align.metrics.CCMetric', 'metrics.CCMetric', (['(3)'], {}), '(3)\n', (58417, 58420), False, 'from dipy.align import affine_registration, center_of_mass, translation, rigid, affine, imwarp, metrics\n'), ((58832, 59064), 'dipy.align.affine_registration', 'affine_registration', (['moving_zoomed', 'static_zoomed', 'moving_affine', 'static_affine'], {'nbins': '(32)', 'metric': '"""MI"""', 'pipeline': 'pipeline_options[step]', 'level_iters': 'niter[step]', 'sigmas': 'sigmas', 'factors': 'factors', 'starting_affine': 'reg_affine'}), "(moving_zoomed, static_zoomed, moving_affine,\n static_affine, nbins=32, metric='MI', pipeline=pipeline_options[step],\n level_iters=niter[step], sigmas=sigmas, factors=factors,\n starting_affine=reg_affine)\n", (58851, 59064), False, 'from dipy.align import affine_registration, center_of_mass, translation, rigid, affine, imwarp, metrics\n'), ((59252, 59285), 'numpy.linalg.norm', 'np.linalg.norm', (['reg_affine[:3, 3]'], {}), '(reg_affine[:3, 3])\n', (59266, 59285), True, 'import numpy as np\n'), ((61748, 61776), 'numpy.linalg.inv', 'np.linalg.inv', (['static_affine'], {}), '(static_affine)\n', (61761, 61776), True, 'import numpy as np\n'), ((31915, 31943), 'numpy.ones', 'np.ones', (['(dists.shape[0], 1)'], {}), '((dists.shape[0], 1))\n', (31922, 31943), True, 'import numpy as np\n'), ((41535, 41574), 'numpy.sqrt', 'np.sqrt', (['(1.0 - rot[0] + rot[4] - rot[8])'], {}), '(1.0 - rot[0] + rot[4] - rot[8])\n', (41542, 41574), True, 'import numpy as np\n'), ((41729, 41768), 'numpy.sqrt', 'np.sqrt', (['(1.0 - rot[0] - rot[4] + rot[8])'], {}), '(1.0 - rot[0] - rot[4] + rot[8])\n', (41736, 41768), True, 'import numpy as np\n'), ((27256, 27270), 'numpy.sign', 'np.sign', (['order'], {}), '(order)\n', (27263, 27270), True, 'import numpy as np\n'), ((30703, 30719), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (30711, 30719), True, 'import numpy as np\n'), ((35723, 35756), 'numpy.array_str', 'np.array_str', (['src_center', 'None', '(3)'], {}), '(src_center, None, 3)\n', (35735, 35756), True, 'import numpy as np\n'), ((35785, 35819), 'numpy.array_str', 'np.array_str', (['dest_center', 'None', '(3)'], {}), '(dest_center, None, 3)\n', (35797, 35819), True, 'import numpy as np\n'), ((59363, 59374), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (59371, 59374), True, 'import numpy as np\n'), ((53000, 53026), 'numpy.array', 'np.array', (['val'], {'dtype': 'float'}), '(val, dtype=float)\n', (53008, 53026), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Unit tests for utilities."""
import unittest
from cclib.parser import utils
import numpy
import scipy.spatial.transform
class FloatTest(unittest.TestCase):
def test_float_basic(self):
"""Are floats converted from strings correctly?"""
self.assertEqual(utils.float("0.0"), 0.0)
self.assertEqual(utils.float("1.0"), 1.0)
self.assertEqual(utils.float("-1.0"), -1.0)
def test_float_numeric_format(self):
"""Does numeric formatting get converted correctly?"""
self.assertEqual(utils.float("1.2345E+02"), 123.45)
self.assertEqual(utils.float("1.2345D+02"), 123.45)
def test_float_stars(self):
"""Does the function return nan for stars?"""
self.assertTrue(numpy.isnan(utils.float("*")))
self.assertTrue(numpy.isnan(utils.float("*****")))
class ConvertorTest(unittest.TestCase):
def test_convertor(self):
self.assertEqual(f"{utils.convertor(8.0, 'eV', 'wavenumber'):.3f}", "64524.354")
class GetRotationTest(unittest.TestCase):
delta = 1e-14
def setUp(self):
self.r = scipy.spatial.transform.Rotation.from_euler('xyz', [15, 25, 35], degrees=True)
self.t = numpy.array([-1, 0, 2])
self.a = numpy.array([[1., 1., 1.],
[0., 1., 2.],
[0., 0., 0.],
[0., 0., 4.]])
self.b = self.r.apply(self.a + self.t)
def test_default(self):
"""Is the rotation is correct?"""
_r = utils.get_rotation(self.a, self.b)
# as_dcm is renamed to from_matrix in scipy 1.4.0 and will be removed in sicpy 1.6.0
if hasattr(self.r, "as_matrix"):
numpy.testing.assert_allclose(self.r.as_matrix(), _r.as_matrix(), atol=self.delta)
else:
numpy.testing.assert_allclose(self.r.as_dcm(), _r.as_dcm(), atol=self.delta)
def test_two_atoms(self):
"""Is the rotation is correct for 2 atoms?"""
a2 = self.a[:2]
b2 = self.b[:2]
rotated_diff = self.r.apply(a2) - utils.get_rotation(a2, b2).apply(a2)
# rotated_diff should be translation
numpy.testing.assert_allclose(rotated_diff[0], rotated_diff[1], atol=self.delta)
def test_one_atom(self):
"""Is the rotation is identity for 1 atom?"""
a1 = self.a[:1]
b1 = self.b[:1]
if hasattr(self.r, "as_matrix"):
numpy.testing.assert_allclose(numpy.eye(3), utils.get_rotation(a1, b1).as_matrix(), atol=self.delta)
else:
numpy.testing.assert_allclose(numpy.eye(3), utils.get_rotation(a1, b1).as_dcm(), atol=self.delta)
class PeriodicTableTest(unittest.TestCase):
def setUp(self):
self.t = utils.PeriodicTable()
def test_periodictable(self):
self.assertEqual(self.t.element[6], 'C')
self.assertEqual(self.t.number['C'], 6)
self.assertEqual(self.t.element[44], 'Ru')
self.assertEqual(self.t.number['Au'], 79)
class WidthSplitterTest(unittest.TestCase):
def test_default(self):
"""Does the splitter remove empty fields by default properly?"""
fixed_splitter = utils.WidthSplitter((4, 3, 5, 6, 10, 10, 10, 10, 10, 10))
line_full = " 60 H 10 s 0.14639 0.00000 0.00000 -0.00000 -0.00000 0.00000"
line_truncated = " 1 C 1 s -0.00000 -0.00000 0.00000"
ref_full = ['60', 'H', '10', 's', '0.14639', '0.00000', '0.00000', '-0.00000', '-0.00000', '0.00000']
ref_truncated = ['1', 'C', '1', 's', '-0.00000', '-0.00000', '0.00000']
tokens_full = fixed_splitter.split(line_full)
tokens_truncated = fixed_splitter.split(line_truncated)
self.assertEqual(ref_full, tokens_full)
self.assertEqual(ref_truncated, tokens_truncated)
def test_no_truncation(self):
"""Does the splitter return even the empty fields when asked?"""
fixed_splitter = utils.WidthSplitter((4, 3, 5, 6, 10, 10, 10, 10, 10, 10))
line = " 1 C 1 s -0.00000 -0.00000 0.00000"
ref_not_truncated = ['1', 'C', '1', 's', '-0.00000', '-0.00000', '0.00000', '', '', '']
tokens_not_truncated = fixed_splitter.split(line, truncate=False)
self.assertEqual(ref_not_truncated, tokens_not_truncated)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"cclib.parser.utils.convertor",
"numpy.eye",
"cclib.parser.utils.get_rotation",
"cclib.parser.utils.WidthSplitter",
"cclib.parser.utils.float",
"numpy.array",
"cclib.parser.utils.PeriodicTable",
"numpy.testing.assert_allclose"
] | [((4533, 4548), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4546, 4548), False, 'import unittest\n'), ((1390, 1413), 'numpy.array', 'numpy.array', (['[-1, 0, 2]'], {}), '([-1, 0, 2])\n', (1401, 1413), False, 'import numpy\n'), ((1431, 1517), 'numpy.array', 'numpy.array', (['[[1.0, 1.0, 1.0], [0.0, 1.0, 2.0], [0.0, 0.0, 0.0], [0.0, 0.0, 4.0]]'], {}), '([[1.0, 1.0, 1.0], [0.0, 1.0, 2.0], [0.0, 0.0, 0.0], [0.0, 0.0, \n 4.0]])\n', (1442, 1517), False, 'import numpy\n'), ((1722, 1756), 'cclib.parser.utils.get_rotation', 'utils.get_rotation', (['self.a', 'self.b'], {}), '(self.a, self.b)\n', (1740, 1756), False, 'from cclib.parser import utils\n'), ((2354, 2439), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (['rotated_diff[0]', 'rotated_diff[1]'], {'atol': 'self.delta'}), '(rotated_diff[0], rotated_diff[1], atol=self.delta\n )\n', (2383, 2439), False, 'import numpy\n'), ((2930, 2951), 'cclib.parser.utils.PeriodicTable', 'utils.PeriodicTable', ([], {}), '()\n', (2949, 2951), False, 'from cclib.parser import utils\n'), ((3358, 3415), 'cclib.parser.utils.WidthSplitter', 'utils.WidthSplitter', (['(4, 3, 5, 6, 10, 10, 10, 10, 10, 10)'], {}), '((4, 3, 5, 6, 10, 10, 10, 10, 10, 10))\n', (3377, 3415), False, 'from cclib.parser import utils\n'), ((4140, 4197), 'cclib.parser.utils.WidthSplitter', 'utils.WidthSplitter', (['(4, 3, 5, 6, 10, 10, 10, 10, 10, 10)'], {}), '((4, 3, 5, 6, 10, 10, 10, 10, 10, 10))\n', (4159, 4197), False, 'from cclib.parser import utils\n'), ((478, 496), 'cclib.parser.utils.float', 'utils.float', (['"""0.0"""'], {}), "('0.0')\n", (489, 496), False, 'from cclib.parser import utils\n'), ((528, 546), 'cclib.parser.utils.float', 'utils.float', (['"""1.0"""'], {}), "('1.0')\n", (539, 546), False, 'from cclib.parser import utils\n'), ((578, 597), 'cclib.parser.utils.float', 'utils.float', (['"""-1.0"""'], {}), "('-1.0')\n", (589, 597), False, 'from cclib.parser import utils\n'), ((735, 760), 'cclib.parser.utils.float', 'utils.float', (['"""1.2345E+02"""'], {}), "('1.2345E+02')\n", (746, 760), False, 'from cclib.parser import utils\n'), ((795, 820), 'cclib.parser.utils.float', 'utils.float', (['"""1.2345D+02"""'], {}), "('1.2345D+02')\n", (806, 820), False, 'from cclib.parser import utils\n'), ((953, 969), 'cclib.parser.utils.float', 'utils.float', (['"""*"""'], {}), "('*')\n", (964, 969), False, 'from cclib.parser import utils\n'), ((1008, 1028), 'cclib.parser.utils.float', 'utils.float', (['"""*****"""'], {}), "('*****')\n", (1019, 1028), False, 'from cclib.parser import utils\n'), ((2650, 2662), 'numpy.eye', 'numpy.eye', (['(3)'], {}), '(3)\n', (2659, 2662), False, 'import numpy\n'), ((2777, 2789), 'numpy.eye', 'numpy.eye', (['(3)'], {}), '(3)\n', (2786, 2789), False, 'import numpy\n'), ((1132, 1172), 'cclib.parser.utils.convertor', 'utils.convertor', (['(8.0)', '"""eV"""', '"""wavenumber"""'], {}), "(8.0, 'eV', 'wavenumber')\n", (1147, 1172), False, 'from cclib.parser import utils\n'), ((2264, 2290), 'cclib.parser.utils.get_rotation', 'utils.get_rotation', (['a2', 'b2'], {}), '(a2, b2)\n', (2282, 2290), False, 'from cclib.parser import utils\n'), ((2664, 2690), 'cclib.parser.utils.get_rotation', 'utils.get_rotation', (['a1', 'b1'], {}), '(a1, b1)\n', (2682, 2690), False, 'from cclib.parser import utils\n'), ((2791, 2817), 'cclib.parser.utils.get_rotation', 'utils.get_rotation', (['a1', 'b1'], {}), '(a1, b1)\n', (2809, 2817), False, 'from cclib.parser import utils\n')] |
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the :mod:`pennylane.init` module.
"""
# pylint: disable=protected-access,cell-var-from-loop
import pytest
import numpy as np
import pennylane as qml
#######################################
# Functions and their signatures
# Functions returning a single parameter array
# function name, kwargs and target shape
INIT_KWARGS_SHAPES = [(qml.init.random_layers_normal,
{'n_layers': 2, 'n_wires': 3, 'n_rots': 10, 'mean': 0, 'std': 1},
(2, 10)),
(qml.init.random_layers_normal,
{'n_layers': 2, 'n_wires': 1, 'n_rots': 10, 'mean': 0, 'std': 1},
(2, 10)),
(qml.init.random_layers_normal,
{'n_layers': 2, 'n_wires': 3, 'n_rots': None, 'mean': 0, 'std': 1},
(2, 3)),
(qml.init.strong_ent_layers_normal,
{'n_layers': 2, 'n_wires': 3, 'mean': 0, 'std': 1},
(2, 3, 3)),
(qml.init.strong_ent_layers_normal,
{'n_layers': 2, 'n_wires': 1, 'mean': 0, 'std': 1},
(2, 1, 3)),
(qml.init.cvqnn_layers_theta_normal,
{'n_layers': 2, 'n_wires': 3, 'mean': 0, 'std': 1},
(2, 3)),
(qml.init.cvqnn_layers_theta_normal,
{'n_layers': 2, 'n_wires': 1, 'mean': 0, 'std': 1},
(2, 0)),
(qml.init.cvqnn_layers_phi_normal,
{'n_layers': 2, 'n_wires': 3, 'mean': 0, 'std': 1},
(2, 3)),
(qml.init.cvqnn_layers_phi_normal,
{'n_layers': 2, 'n_wires': 1, 'mean': 0, 'std': 1},
(2, 0)),
(qml.init.cvqnn_layers_varphi_normal,
{'n_layers': 2, 'n_wires': 3, 'mean': 0, 'std': 1},
(2, 3),),
(qml.init.cvqnn_layers_varphi_normal,
{'n_layers': 2, 'n_wires': 1, 'mean': 0, 'std': 1},
(2, 1),),
(qml.init.cvqnn_layers_r_normal,
{'n_layers': 2, 'n_wires': 3, 'mean': 0, 'std': 1},
(2, 3),),
(qml.init.cvqnn_layers_r_normal,
{'n_layers': 2, 'n_wires': 1, 'mean': 0, 'std': 1},
(2, 1),),
(qml.init.cvqnn_layers_phi_r_normal,
{'n_layers': 2, 'n_wires': 3, 'mean': 0, 'std': 1},
(2, 3),),
(qml.init.cvqnn_layers_phi_r_normal,
{'n_layers': 2, 'n_wires': 1, 'mean': 0, 'std': 1},
(2, 1),),
(qml.init.cvqnn_layers_a_normal,
{'n_layers': 2, 'n_wires': 3, 'mean': 0, 'std': 1},
(2, 3),),
(qml.init.cvqnn_layers_a_normal,
{'n_layers': 2, 'n_wires': 1, 'mean': 0, 'std': 1},
(2, 1),),
(qml.init.cvqnn_layers_phi_a_normal,
{'n_layers': 2, 'n_wires': 3, 'mean': 0, 'std': 1},
(2, 3),),
(qml.init.cvqnn_layers_phi_a_normal,
{'n_layers': 2, 'n_wires': 1, 'mean': 0, 'std': 1},
(2, 1),),
(qml.init.cvqnn_layers_kappa_normal,
{'n_layers': 2, 'n_wires': 3, 'mean': 0, 'std': 1},
(2, 3),),
(qml.init.cvqnn_layers_kappa_normal,
{'n_layers': 2, 'n_wires': 1, 'mean': 0, 'std': 1},
(2, 1),),
(qml.init.interferometer_theta_normal,
{'n_wires': 3, 'mean': 0, 'std': 1},
(3,)),
(qml.init.interferometer_theta_normal,
{'n_wires': 1, 'mean': 0, 'std': 1},
(0,)),
(qml.init.interferometer_phi_normal,
{'n_wires': 3, 'mean': 0, 'std': 1},
(3,)),
(qml.init.interferometer_phi_normal,
{'n_wires': 1, 'mean': 0, 'std': 1},
(0,)),
(qml.init.interferometer_varphi_normal,
{'n_wires': 3, 'mean': 0, 'std': 1},
(3,)),
(qml.init.interferometer_varphi_normal,
{'n_wires': 1, 'mean': 0, 'std': 1},
(1,)),
(qml.init.random_layers_uniform,
{'n_layers': 2, 'n_wires': 3, 'n_rots': 10, 'low': 0, 'high': 1},
(2, 10)),
(qml.init.random_layers_uniform,
{'n_layers': 2, 'n_wires': 3, 'n_rots': None, 'low': 0, 'high': 1},
(2, 3)),
(qml.init.random_layers_uniform,
{'n_layers': 2, 'n_wires': 1, 'n_rots': None, 'low': 0, 'high': 1},
(2, 1)),
(qml.init.random_layers_uniform,
{'n_layers': 2, 'n_wires': 1, 'n_rots': 10, 'low': 0, 'high': 1},
(2, 10)),
(qml.init.strong_ent_layers_uniform,
{'n_layers': 2, 'n_wires': 3, 'low': 0, 'high': 1},
(2, 3, 3)),
(qml.init.strong_ent_layers_uniform,
{'n_layers': 2, 'n_wires': 1, 'low': 0, 'high': 1},
(2, 1, 3)),
(qml.init.cvqnn_layers_theta_uniform,
{'n_layers': 2, 'n_wires': 3, 'low': 0, 'high': 1},
(2, 3)),
(qml.init.cvqnn_layers_theta_uniform,
{'n_layers': 2, 'n_wires': 1, 'low': 0, 'high': 1},
(2, 0)),
(qml.init.cvqnn_layers_phi_uniform,
{'n_layers': 2, 'n_wires': 3, 'low': 0, 'high': 1},
(2, 3)),
(qml.init.cvqnn_layers_phi_uniform,
{'n_layers': 2, 'n_wires': 1, 'low': 0, 'high': 1},
(2, 0)),
(qml.init.cvqnn_layers_varphi_uniform,
{'n_layers': 2, 'n_wires': 3, 'low': 0, 'high': 1},
(2, 3)),
(qml.init.cvqnn_layers_varphi_uniform,
{'n_layers': 2, 'n_wires': 1, 'low': 0, 'high': 1},
(2, 1)),
(qml.init.cvqnn_layers_r_uniform,
{'n_layers': 2, 'n_wires': 3, 'low': 0, 'high': 1},
(2, 3)),
(qml.init.cvqnn_layers_r_uniform,
{'n_layers': 2, 'n_wires': 1, 'low': 0, 'high': 1},
(2, 1)),
(qml.init.cvqnn_layers_phi_r_uniform,
{'n_layers': 2, 'n_wires': 3, 'low': 0, 'high': 1},
(2, 3)),
(qml.init.cvqnn_layers_phi_r_uniform,
{'n_layers': 2, 'n_wires': 1, 'low': 0, 'high': 1},
(2, 1)),
(qml.init.cvqnn_layers_a_uniform,
{'n_layers': 2, 'n_wires': 3, 'low': 0, 'high': 1},
(2, 3)),
(qml.init.cvqnn_layers_a_uniform,
{'n_layers': 2, 'n_wires': 1, 'low': 0, 'high': 1},
(2, 1)),
(qml.init.cvqnn_layers_phi_a_uniform,
{'n_layers': 2, 'n_wires': 3, 'low': 0, 'high': 1},
(2, 3)),
(qml.init.cvqnn_layers_phi_a_uniform,
{'n_layers': 2, 'n_wires': 1, 'low': 0, 'high': 1},
(2, 1)),
(qml.init.cvqnn_layers_kappa_uniform,
{'n_layers': 2, 'n_wires': 3, 'low': 0, 'high': 1},
(2, 3)),
(qml.init.cvqnn_layers_kappa_uniform,
{'n_layers': 2, 'n_wires': 1, 'low': 0, 'high': 1},
(2, 1)),
(qml.init.interferometer_theta_uniform,
{'n_wires': 3, 'low': 0, 'high': 1},
(3,)),
(qml.init.interferometer_theta_uniform,
{'n_wires': 1, 'low': 0, 'high': 1},
(0,)),
(qml.init.interferometer_phi_uniform,
{'n_wires': 3, 'low': 0, 'high': 1},
(3,)),
(qml.init.interferometer_phi_uniform,
{'n_wires': 1, 'low': 0, 'high': 1},
(0,)),
(qml.init.interferometer_varphi_uniform,
{'n_wires': 3, 'low': 0, 'high': 1},
(3,)),
(qml.init.interferometer_varphi_uniform,
{'n_wires': 1, 'low': 0, 'high': 1},
(1,)),
(qml.init.qaoa_embedding_normal,
{'n_layers': 2, 'n_wires': 3, 'mean': 0, 'std': 1},
(2, 2*3)),
(qml.init.qaoa_embedding_uniform,
{'n_layers': 2, 'n_wires': 3, 'low': 0, 'high': 1},
(2, 2*3)),
(qml.init.qaoa_embedding_uniform,
{'n_layers': 2, 'n_wires': 1, 'low': 0, 'high': 1},
(2, 1)),
(qml.init.qaoa_embedding_uniform,
{'n_layers': 2, 'n_wires': 2, 'low': 0, 'high': 1},
(2, 3)),
(qml.init.qaoa_embedding_normal,
{'n_layers': 2, 'n_wires': 1, 'mean': 0, 'std': 1},
(2, 1)),
(qml.init.qaoa_embedding_normal,
{'n_layers': 2, 'n_wires': 2, 'mean': 0, 'std': 1},
(2, 3)),
(qml.init.simplified_two_design_initial_layer_uniform,
{'n_wires': 1, 'low': 0, 'high': 1},
(1,)),
(qml.init.simplified_two_design_initial_layer_uniform,
{'n_wires': 3, 'low': 0, 'high': 1},
(3,)),
(qml.init.simplified_two_design_initial_layer_normal,
{'n_wires': 1, 'mean': 0, 'std': 1},
(1,)),
(qml.init.simplified_two_design_initial_layer_normal,
{'n_wires': 3, 'mean': 0, 'std': 1},
(3,)),
(qml.init.simplified_two_design_weights_uniform,
{'n_layers': 2, 'n_wires': 1, 'low': 0, 'high': 1},
(0,)),
(qml.init.simplified_two_design_weights_uniform,
{'n_layers': 2, 'n_wires': 2, 'low': 0, 'high': 1},
(2, 1, 2)),
(qml.init.simplified_two_design_weights_uniform,
{'n_layers': 2, 'n_wires': 4, 'low': 0, 'high': 1},
(2, 3, 2)),
(qml.init.simplified_two_design_weights_normal,
{'n_layers': 2, 'n_wires': 1, 'mean': 0, 'std': 1},
(0, )),
(qml.init.simplified_two_design_weights_normal,
{'n_layers': 2, 'n_wires': 2, 'mean': 0, 'std': 1},
(2, 1, 2)),
(qml.init.simplified_two_design_weights_normal,
{'n_layers': 2, 'n_wires': 4, 'mean': 0, 'std': 1},
(2, 3, 2)),
(qml.init.basic_entangler_layers_normal,
{'n_layers': 2, 'n_wires': 1, 'mean': 0, 'std': 1},
(2, 1)),
(qml.init.basic_entangler_layers_normal,
{'n_layers': 2, 'n_wires': 2, 'mean': 0, 'std': 1},
(2, 2)),
(qml.init.basic_entangler_layers_uniform,
{'n_layers': 2, 'n_wires': 1, 'low': 0, 'high': 1},
(2, 1)),
(qml.init.basic_entangler_layers_uniform,
{'n_layers': 2, 'n_wires': 2, 'low': 0, 'high': 1},
(2, 2)),
]
# Functions returning a list of parameter arrays
INITALL_KWARGS_SHAPES = [(qml.init.cvqnn_layers_all, {'n_layers': 2, 'n_wires': 3},
[(2, 3)]*11),
(qml.init.interferometer_all, {'n_wires': 3}, [(3,), (3,), (3,)])]
# Without target shapes
INIT_KWARGS = [i[0:2] for i in INIT_KWARGS_SHAPES]
#################
class TestInit:
"""Tests the initialization functions from the ``init`` module."""
@pytest.mark.parametrize("init, sgntr, shp", INIT_KWARGS_SHAPES)
def test_shape(self, init, sgntr, shp, seed):
"""Confirm that initialization functions
return an array with the correct shape."""
s = {**sgntr, 'seed': seed}
p = init(**s)
assert p.shape == shp
@pytest.mark.parametrize("init, sgntr, shp", INITALL_KWARGS_SHAPES)
def test_all_shape(self, init, sgntr, shp, seed):
"""Confirm that ``all`` initialization functions
return an array with the correct shape."""
s = {**sgntr, 'seed': seed}
p = init(**s)
shapes = [p_.shape for p_ in p]
assert shapes == shp
@pytest.mark.parametrize("init, sgntr", INIT_KWARGS)
def test_same_output_for_same_seed(self, init, sgntr, seed, tol):
"""Confirm that initialization functions return a deterministic output
for a fixed seed."""
# exclude case of empty parameter list
if len(init(**sgntr).flatten()) == 0:
pytest.skip("test is skipped for empty parameter array")
s = {**sgntr, 'seed': seed}
p1 = init(**s)
p2 = init(**s)
assert np.allclose(p1, p2, atol=tol)
@pytest.mark.parametrize("init, sgntr", INIT_KWARGS)
def test_diff_output_for_diff_seed(self, init, sgntr, seed, tol):
"""Confirm that initialization function returns a different output for
different seeds."""
# exclude case of empty parameter list
if len(init(**sgntr).flatten()) == 0:
pytest.skip("test is skipped for empty parameter array")
s = {**sgntr, 'seed': seed}
p1 = init(**s)
s = {**s, 'seed': seed + 1}
p2 = init(**s)
if p1.shape != (0,):
assert not np.allclose(p1, p2, atol=tol)
@pytest.mark.parametrize("init, sgntr", INIT_KWARGS)
def test_interval(self, init, sgntr, seed, tol):
"""Test that sampled parameters lie in correct interval."""
# exclude case of empty parameter list
if len(init(**sgntr).flatten()) == 0:
pytest.skip("test is skipped for empty parameter array")
s = {**sgntr, 'seed': seed}
# Case A: Uniformly distributed parameters
if 'low' in s.keys() and 'high' in s.keys():
s['low'] = 1
s['high'] = 1
p = init(**s)
p_mean = np.mean(p)
assert np.isclose(p_mean, 1, atol=tol)
# Case B: Normally distributed parameters
if 'mean' in s.keys() and 'std' in s.keys():
s['mean'] = 1
s['std'] = 0
p = init(**s)
p_mean = np.mean(p)
assert np.isclose(p_mean, 1, atol=tol)
@pytest.mark.parametrize("init, sgntr", INIT_KWARGS)
def test_zero_wires(self, init, sgntr):
"""Test that edge case of zero wires returns empty parameter array."""
if "n_wires" in sgntr:
sgntr["n_wires"] = 0
p = init(**sgntr)
assert p.flatten().shape == (0,)
| [
"numpy.allclose",
"pytest.skip",
"numpy.isclose",
"numpy.mean",
"pytest.mark.parametrize"
] | [((13842, 13905), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""init, sgntr, shp"""', 'INIT_KWARGS_SHAPES'], {}), "('init, sgntr, shp', INIT_KWARGS_SHAPES)\n", (13865, 13905), False, 'import pytest\n'), ((14151, 14217), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""init, sgntr, shp"""', 'INITALL_KWARGS_SHAPES'], {}), "('init, sgntr, shp', INITALL_KWARGS_SHAPES)\n", (14174, 14217), False, 'import pytest\n'), ((14515, 14566), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""init, sgntr"""', 'INIT_KWARGS'], {}), "('init, sgntr', INIT_KWARGS)\n", (14538, 14566), False, 'import pytest\n'), ((15042, 15093), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""init, sgntr"""', 'INIT_KWARGS'], {}), "('init, sgntr', INIT_KWARGS)\n", (15065, 15093), False, 'import pytest\n'), ((15642, 15693), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""init, sgntr"""', 'INIT_KWARGS'], {}), "('init, sgntr', INIT_KWARGS)\n", (15665, 15693), False, 'import pytest\n'), ((16539, 16590), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""init, sgntr"""', 'INIT_KWARGS'], {}), "('init, sgntr', INIT_KWARGS)\n", (16562, 16590), False, 'import pytest\n'), ((15006, 15035), 'numpy.allclose', 'np.allclose', (['p1', 'p2'], {'atol': 'tol'}), '(p1, p2, atol=tol)\n', (15017, 15035), True, 'import numpy as np\n'), ((16475, 16485), 'numpy.mean', 'np.mean', (['p'], {}), '(p)\n', (16482, 16485), True, 'import numpy as np\n'), ((16501, 16532), 'numpy.isclose', 'np.isclose', (['p_mean', '(1)'], {'atol': 'tol'}), '(p_mean, 1, atol=tol)\n', (16511, 16532), True, 'import numpy as np\n'), ((14851, 14907), 'pytest.skip', 'pytest.skip', (['"""test is skipped for empty parameter array"""'], {}), "('test is skipped for empty parameter array')\n", (14862, 14907), False, 'import pytest\n'), ((15377, 15433), 'pytest.skip', 'pytest.skip', (['"""test is skipped for empty parameter array"""'], {}), "('test is skipped for empty parameter array')\n", (15388, 15433), False, 'import pytest\n'), ((15921, 15977), 'pytest.skip', 'pytest.skip', (['"""test is skipped for empty parameter array"""'], {}), "('test is skipped for empty parameter array')\n", (15932, 15977), False, 'import pytest\n'), ((16218, 16228), 'numpy.mean', 'np.mean', (['p'], {}), '(p)\n', (16225, 16228), True, 'import numpy as np\n'), ((16248, 16279), 'numpy.isclose', 'np.isclose', (['p_mean', '(1)'], {'atol': 'tol'}), '(p_mean, 1, atol=tol)\n', (16258, 16279), True, 'import numpy as np\n'), ((15606, 15635), 'numpy.allclose', 'np.allclose', (['p1', 'p2'], {'atol': 'tol'}), '(p1, p2, atol=tol)\n', (15617, 15635), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# classify.py: make classification of emotion given input features (in npy files)
# dataset: JSET v1.1
# author: <NAME> (<EMAIL>)
# Changelong
# 20210420: initial commit
# 2021042512: change to dense
# 20210430: use the real text-independent (TI) split
import numpy as np
import tensorflow as tf
tf.random.set_seed(221)
import random
random.seed(221)
np.random.seed(221)
# import feature and labels
path_base = '/home/bagus/research/2021/jtes_base/emo_large/'
x_train = np.load(path_base + 'hsf/emo_large_train.npy', allow_pickle=True)
x_test = np.load(path_base + 'hsf/emo_large_test.npy', allow_pickle=True)
x_train = np.vstack(x_train).astype(np.float)
x_test = np.vstack(x_test).astype(np.float)
# label
y_train = np.load(path_base + '../data_sti/y_train_sti1.npy')
y_test = np.load(path_base + '../data_sti/y_test_sti1.npy')
def dense_model():
inputs = tf.keras.Input(shape=(6552,))
x = tf.keras.layers.BatchNormalization()(inputs)
x = tf.keras.layers.Dense(256, activation='relu')(x)
x = tf.keras.layers.Dense(256, activation='relu')(x)
x = tf.keras.layers.Dense(256, activation='relu')(x)
# x = tf.keras.layers.Flatten()(x)
# x = tf.keras.layers.Dropout(0.2)(x)
outputs = tf.keras.layers.Dense(4, activation='softmax')(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
return model
# model compilation
model = dense_model()
print(model.summary())
# callbacks, for dense better not to use best weights
callback = tf.keras.callbacks.EarlyStopping(monitor='loss',
# restore_best_weights=True,
patience=10)
model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
optimizer=tf.keras.optimizers.Adam(),
metrics=["accuracy"],
)
acc_avg = []
for i in range(0, 30):
history = model.fit(x_train,
y_train,
batch_size=1024,
epochs=25,
# callbacks=[callback],
validation_split=0.2)
# test the model
test_scores = model.evaluate(x_test, y_test, verbose=2)
acc_avg.append(test_scores[1])
# print("Test accuracy:", test_scores[1])
print("Test accuracy:", np.mean(acc_avg), ' +/- ', np.std(acc_avg))
| [
"tensorflow.random.set_seed",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"numpy.load",
"numpy.random.seed",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Dense",
"numpy.std",
"tensorflow.keras.Input",
"tensorflow.keras.Model",
"numpy.mean",
"random.seed",
... | [((323, 346), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(221)'], {}), '(221)\n', (341, 346), True, 'import tensorflow as tf\n'), ((362, 378), 'random.seed', 'random.seed', (['(221)'], {}), '(221)\n', (373, 378), False, 'import random\n'), ((379, 398), 'numpy.random.seed', 'np.random.seed', (['(221)'], {}), '(221)\n', (393, 398), True, 'import numpy as np\n'), ((499, 564), 'numpy.load', 'np.load', (["(path_base + 'hsf/emo_large_train.npy')"], {'allow_pickle': '(True)'}), "(path_base + 'hsf/emo_large_train.npy', allow_pickle=True)\n", (506, 564), True, 'import numpy as np\n'), ((574, 638), 'numpy.load', 'np.load', (["(path_base + 'hsf/emo_large_test.npy')"], {'allow_pickle': '(True)'}), "(path_base + 'hsf/emo_large_test.npy', allow_pickle=True)\n", (581, 638), True, 'import numpy as np\n'), ((749, 800), 'numpy.load', 'np.load', (["(path_base + '../data_sti/y_train_sti1.npy')"], {}), "(path_base + '../data_sti/y_train_sti1.npy')\n", (756, 800), True, 'import numpy as np\n'), ((810, 860), 'numpy.load', 'np.load', (["(path_base + '../data_sti/y_test_sti1.npy')"], {}), "(path_base + '../data_sti/y_test_sti1.npy')\n", (817, 860), True, 'import numpy as np\n'), ((1504, 1565), 'tensorflow.keras.callbacks.EarlyStopping', 'tf.keras.callbacks.EarlyStopping', ([], {'monitor': '"""loss"""', 'patience': '(10)'}), "(monitor='loss', patience=10)\n", (1536, 1565), True, 'import tensorflow as tf\n'), ((894, 923), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(6552,)'}), '(shape=(6552,))\n', (908, 923), True, 'import tensorflow as tf\n'), ((1305, 1351), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputs', 'outputs': 'outputs'}), '(inputs=inputs, outputs=outputs)\n', (1319, 1351), True, 'import tensorflow as tf\n'), ((2305, 2321), 'numpy.mean', 'np.mean', (['acc_avg'], {}), '(acc_avg)\n', (2312, 2321), True, 'import numpy as np\n'), ((2332, 2347), 'numpy.std', 'np.std', (['acc_avg'], {}), '(acc_avg)\n', (2338, 2347), True, 'import numpy as np\n'), ((650, 668), 'numpy.vstack', 'np.vstack', (['x_train'], {}), '(x_train)\n', (659, 668), True, 'import numpy as np\n'), ((695, 712), 'numpy.vstack', 'np.vstack', (['x_test'], {}), '(x_test)\n', (704, 712), True, 'import numpy as np\n'), ((932, 968), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (966, 968), True, 'import tensorflow as tf\n'), ((985, 1030), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (1006, 1030), True, 'import tensorflow as tf\n'), ((1042, 1087), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (1063, 1087), True, 'import tensorflow as tf\n'), ((1099, 1144), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (1120, 1144), True, 'import tensorflow as tf\n'), ((1243, 1289), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(4)'], {'activation': '"""softmax"""'}), "(4, activation='softmax')\n", (1264, 1289), True, 'import tensorflow as tf\n'), ((1709, 1773), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(False)'}), '(from_logits=False)\n', (1754, 1773), True, 'import tensorflow as tf\n'), ((1789, 1815), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {}), '()\n', (1813, 1815), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python
import numpy as np
import math
from multi_link_common import *
#height is probably 0 from multi_link_common.py
#total mass and total length are also defined in multi_link_common.py
num_links = 5.0
link_length = total_length/num_links
link_mass = total_mass/num_links
ee_location = np.matrix([0., -total_length, height]).T
#bod_shapes = ['cube', 'cube', 'cube', 'cube', 'cube']
bod_shapes = ['capsule', 'capsule', 'capsule', 'capsule', 'capsule']
bod_dimensions = [[0.03, 0.03, link_length]]*5
bod_com_position = [[0., -link_length/2., height],
[0., -3.0/2.0*link_length, height],
[0., -5.0/2.0*link_length, height],
[0., -7.0/2.0*link_length, height],
[0., -9.0/2.0*link_length, height]]
bod_color = [[0.4, 0.4, 0.4, 1], [0.8, 0.8, 0.8, 1], [0.33, 0.33, 0.33, 1], [0.5, 0.5, 0.5, 1], [0.1, 0.1, 0.1, 1]]
bod_num_links = 5
bod_mass = [link_mass]*bod_num_links
bod_names = ['link1', 'link2', 'link3', 'link4', 'link5']
bodies ={'shapes':bod_shapes, 'dim':bod_dimensions, 'num_links':bod_num_links,
'com_pos':bod_com_position, 'mass':bod_mass, 'name':bod_names, 'color':bod_color}
b_jt_axis = [[0.,0.,1.],[0.,0.,1.], [0.,0.,1.], [0.,0.,1.],[0.,0.,1.]]
b_jt_anchor = [[0., 0., height],
[0., -link_length, height],
[0., -2*link_length, height],
[0., -3*link_length, height],
[0., -4*link_length, height]]
b_jt_kp = [30., 19., 10., 5., 2.2]
b_jt_kd = [7., 5., 3., 2., 1.]
b_jt_limits_max = np.radians([180, 120, 120, 120, 120]).tolist()
b_jt_limits_min = np.radians([-180, -120, -120, -120, -120]).tolist()
b_jt_axis = [[0.,0.,1.],[0.,0.,1.], [0.,0.,1.], [0.,0.,1.],[0.,0.,1.]]
b_jt_attach = [[0, -1], [1, 0], [2,1], [3,2], [4,3]]
b_jt_start = [-1.52, 0.447, 1.17, 1.52, 0.637] #(gives ee pos of [0, -0.2, 0]
b_jts = {'anchor':b_jt_anchor, 'axis':b_jt_axis, 'jt_lim_max':b_jt_limits_max,
'jt_lim_min':b_jt_limits_min, 'jt_init':b_jt_start, 'jt_attach':b_jt_attach,
'jt_stiffness':b_jt_kp, 'jt_damping':b_jt_kd}
| [
"numpy.radians",
"numpy.matrix"
] | [((306, 345), 'numpy.matrix', 'np.matrix', (['[0.0, -total_length, height]'], {}), '([0.0, -total_length, height])\n', (315, 345), True, 'import numpy as np\n'), ((1572, 1609), 'numpy.radians', 'np.radians', (['[180, 120, 120, 120, 120]'], {}), '([180, 120, 120, 120, 120])\n', (1582, 1609), True, 'import numpy as np\n'), ((1637, 1679), 'numpy.radians', 'np.radians', (['[-180, -120, -120, -120, -120]'], {}), '([-180, -120, -120, -120, -120])\n', (1647, 1679), True, 'import numpy as np\n')] |
"""
Authors: <NAME>, <NAME> and <NAME>
All rights reserved, 2017.
"""
from collections import defaultdict, namedtuple
import torch
from skcuda import cublas, cufft
from pynvrtc.compiler import Program
import numpy as np
from cupy.cuda.function import Module
from cupy.cuda import device
from string import Template
Stream = namedtuple('Stream', ['ptr'])
def get_dtype(t):
if isinstance(t, torch.cuda.FloatTensor):
return 'float'
elif isinstance(t, torch.cuda.DoubleTensor):
return 'double'
def get_compute_arch(t):
return 'compute_{}'.format(device.Device().compute_capability)
def is_complex(input):
return input.size(-1) == 2
class Periodize(object):
"""
This class builds a wrapper to the periodiziation kernels and caches
them.
"""
def __init__(self, jit=True):
self.periodize_cache = defaultdict(lambda: None)
self.block = (32, 32, 1)
self.jit = jit
def GET_BLOCKS(self, N, threads):
return (N + threads - 1) // threads
def __call__(self, input, k):
out = input.new(
input.size(0),
input.size(1),
input.size(2) // k,
input.size(3) // k,
2
)
if not self.jit or isinstance(
input,
(torch.FloatTensor, torch.DoubleTensor)
):
y = input.view(
input.size(0),
input.size(1),
input.size(2) // out.size(2),
out.size(2),
input.size(3) // out.size(3),
out.size(3),
2
)
out = y.mean(4).squeeze(4).mean(2).squeeze(2)
return out
if not is_complex(input):
raise TypeError('The input and outputs should be complex')
input = input.contiguous()
if not self.periodize_cache[
(
input.size(),
out.size(),
input.get_device()
)
]:
B = input.nelement() // (2*input.size(-2) * input.size(-3))
H = input.size(-3)
W = input.size(-2)
k = input.size(-2) // out.size(-2)
kernel = '''
#define NW ${W} / ${k}
#define NH ${H} / ${k}
extern "C"
__global__ void periodize(const ${Dtype}2 *input, ${Dtype}2 *output)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
int tz = blockIdx.z * blockDim.z + threadIdx.z;
if(tx >= NW || ty >= NH || tz >= ${B})
return;
input += tz * ${H} * ${W} + ty * ${W} + tx;
${Dtype}2 res = make_${Dtype}2(0.f, 0.f);
for (int j=0; j<${k}; ++j)
for (int i=0; i<${k}; ++i)
{
const ${Dtype}2 &c = input[j * NH * ${W} + i * NW];
res.x += c.x;
res.y += c.y;
}
res.x /= ${k} * ${k};
res.y /= ${k} * ${k};
output[tz * NH * NW + ty * NW + tx] = res;
}
'''
kernel = Template(kernel).substitute(
B=B,
H=H,
W=W,
k=k,
Dtype=get_dtype(input)
)
name = '-'.join(
[
str(input.get_device()),
str(B),
str(k),
str(H),
str(W),
'periodize.cu'
]
)
print(name)
prog = Program(kernel.encode('utf-8'), name.encode('utf-8'))
ptx = prog.compile(
[
('-arch='+get_compute_arch(input)).encode('utf-8')
]
)
module = Module()
module.load(bytes(ptx.encode()))
self.periodize_cache[
(
input.size(),
out.size(),
input.get_device()
)
] = module
grid = (
self.GET_BLOCKS(
out.size(-3),
self.block[0]
),
self.GET_BLOCKS(
out.size(-2),
self.block[1]
),
self.GET_BLOCKS(
out.nelement() // (2*out.size(-2) * out.size(-3)),
self.block[2]
)
)
periodize = self.periodize_cache[
(
input.size(),
out.size(),
input.get_device()
)
].get_function('periodize')
periodize(
grid=grid,
block=self.block,
args=[input.data_ptr(), out.data_ptr()],
stream=Stream(ptr=torch.cuda.current_stream().cuda_stream)
)
return out
class Modulus(object):
"""
This class builds a wrapper to the moduli kernels and caches them.
"""
def __init__(self, jit=True):
self.modulus_cache = defaultdict(lambda: None)
self.CUDA_NUM_THREADS = 1024
self.jit = jit
def GET_BLOCKS(self, N):
return (N + self.CUDA_NUM_THREADS - 1) // self.CUDA_NUM_THREADS
def __call__(self, input):
if not self.jit or not isinstance(input, torch.cuda.FloatTensor):
norm = input.norm(2, input.dim() - 1)
return torch.cat(
[norm, norm.new(norm.size()).zero_()],
input.dim() - 1
)
out = input.new(input.size())
input = input.contiguous()
if not is_complex(input):
raise TypeError('The input and outputs should be complex')
if self.modulus_cache[input.get_device()] is None:
kernel = """
extern "C"
__global__ void abs_complex_value(const float * x, float2 * z, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n)
return;
z[i] = make_float2(normf(2, x + 2*i), 0);
}
""".encode('utf-8')
print('modulus.cu')
prog = Program(kernel, 'modulus.cu'.encode('utf-8'))
ptx = prog.compile(
[
('-arch='+get_compute_arch(input)).encode('utf-8')
]
)
module = Module()
module.load(bytes(ptx.encode()))
self.modulus_cache[input.get_device()] = module
fabs = self.modulus_cache[input.get_device()].get_function(
'abs_complex_value'
)
fabs(
grid=(
self.GET_BLOCKS(int(out.nelement())//2),
1,
1
),
block=(
self.CUDA_NUM_THREADS,
1,
1
),
args=[input.data_ptr(), out.data_ptr(), out.numel() // 2],
stream=Stream(
ptr=torch.cuda.current_stream().cuda_stream
)
)
return out
class Fft(object):
"""
This class builds a wrapper to the FFTs kernels and cache them.
As a try, the library will purely work with complex data. The FFTS are UNORMALIZED.
"""
def __init__(self):
self.fft_cache = defaultdict(lambda: None)
def buildCache(self, input, _type):
k = input.ndimension() - 3
n = np.asarray([input.size(k), input.size(k+1)], np.int32)
batch = input.nelement() // (2*input.size(k) * input.size(k + 1))
idist = input.size(k) * input.size(k + 1)
istride = 1
ostride = istride
odist = idist
rank = 2
plan = cufft.cufftPlanMany(
rank,
n.ctypes.data,
n.ctypes.data,
istride,
idist,
n.ctypes.data,
ostride,
odist,
_type,
batch
)
self.fft_cache[(input.size(), _type, input.get_device())] = plan
def __del__(self):
for keys in self.fft_cache:
try:
cufft.cufftDestroy(self.fft_cache[keys])
except:
pass
def __call__(self, input, direction='C2C', inplace=False, inverse=False):
if direction == 'C2R':
inverse = True
if not isinstance(input, torch.cuda.FloatTensor):
if not isinstance(input, (torch.FloatTensor, torch.DoubleTensor)):
raise(TypeError('The input should be a torch.cuda.FloatTensor, \
torch.FloatTensor or a torch.DoubleTensor'))
else:
input_np = input[..., 0].numpy() + 1.0j * input[..., 1].numpy()
out_type = input.numpy().dtype
if direction == 'C2R':
return torch.from_numpy(
np.real(
np.fft.ifft2(input_np)
).astype(out_type) * input.size(-2) * input.size(-3)
)
if inplace:
return input.copy_(
torch.from_numpy(
stack_complex(
np.fft.ifft2(input_np)
).astype(out_type)*input.size(-2)*input.size(-3)
if inverse
else stack_complex(
np.fft.fft2(input_np)
).astype(out_type)
)
)
else:
return torch.from_numpy(
stack_complex(
np.fft.ifft2(input_np)
).astype(out_type)*input.size(-2)*input.size(-3)
if inverse
else stack_complex(
np.fft.fft2(input_np)
).astype(out_type)
)
if not is_complex(input):
raise TypeError(
'The input should be complex (e.g. last dimension is 2)'
)
if not input.is_contiguous():
raise RuntimeError('Tensors must be contiguous!')
if direction == 'C2R':
output = input.new(input.size()[:-1])
if self.fft_cache[
(
input.size(),
cufft.CUFFT_C2R,
input.get_device()
)
] is None:
self.buildCache(input, cufft.CUFFT_C2R)
cufft.cufftExecC2R(
self.fft_cache[
(
input.size(),
cufft.CUFFT_C2R,
input.get_device()
)
],
input.data_ptr(),
output.data_ptr()
)
return output
elif direction == 'C2C':
output = input.new(input.size()) if not inplace else input
flag = cufft.CUFFT_INVERSE if inverse else cufft.CUFFT_FORWARD
if self.fft_cache[
(
input.size(),
cufft.CUFFT_C2C,
input.get_device()
)
] is None:
self.buildCache(input, cufft.CUFFT_C2C)
cufft.cufftExecC2C(
self.fft_cache[
(
input.size(),
cufft.CUFFT_C2C,
input.get_device()
)
],
input.data_ptr(),
output.data_ptr(),
flag
)
return output
def stack_complex(x):
return np.stack(
(
np.real(x),
np.imag(x)
),
axis=len(x.shape)
)
def cdgmm(A, B, jit=True, inplace=False):
"""
This function uses the C-wrapper to use cuBLAS.
"""
A, B = A.contiguous(), B.contiguous()
if A.size()[-3:] != B.size():
raise RuntimeError(
'The filters are not compatible for multiplication!'
)
if not is_complex(A) or not is_complex(B):
raise TypeError('The input, filter and output should be complex')
if B.ndimension() != 3:
raise RuntimeError('The filters must be simply a complex array!')
if type(A) is not type(B):
raise RuntimeError('A and B should be same type!')
if not jit or isinstance(A, (torch.FloatTensor, torch.DoubleTensor)):
C = A.new(A.size())
A_r = A[..., 0].contiguous().view(
-1,
A.size(-2) * A.size(-3)
)
A_i = A[..., 1].contiguous().view(
-1,
A.size(-2) * A.size(-3)
)
B_r = B[..., 0].contiguous().view(
B.size(-2) * B.size(-3)
).unsqueeze(0).expand_as(A_i)
B_i = B[..., 1].contiguous().view(
B.size(-2) * B.size(-3)
).unsqueeze(0).expand_as(A_r)
C[..., 0].copy_(A_r * B_r - A_i * B_i)
C[..., 1].copy_(A_r * B_i + A_i * B_r)
return C if not inplace else A.copy_(C)
else:
C = A.new(A.size()) if not inplace else A
m, n = B.nelement() // 2, A.nelement() // B.nelement()
handle = torch.cuda.current_blas_handle()
cublas.cublasSetStream(
handle,
torch.cuda.current_stream()._as_parameter_
)
cublas.cublasCdgmm(
handle,
'l',
m,
n,
A.data_ptr(),
m,
B.data_ptr(),
1,
C.data_ptr(),
m
)
return C
| [
"torch.cuda.current_stream",
"skcuda.cufft.cufftPlanMany",
"collections.defaultdict",
"cupy.cuda.function.Module",
"numpy.imag",
"string.Template",
"cupy.cuda.device.Device",
"collections.namedtuple",
"numpy.real",
"torch.cuda.current_blas_handle",
"numpy.fft.fft2",
"numpy.fft.ifft2",
"skcud... | [((327, 356), 'collections.namedtuple', 'namedtuple', (['"""Stream"""', "['ptr']"], {}), "('Stream', ['ptr'])\n", (337, 356), False, 'from collections import defaultdict, namedtuple\n'), ((869, 895), 'collections.defaultdict', 'defaultdict', (['(lambda : None)'], {}), '(lambda : None)\n', (880, 895), False, 'from collections import defaultdict, namedtuple\n'), ((5171, 5197), 'collections.defaultdict', 'defaultdict', (['(lambda : None)'], {}), '(lambda : None)\n', (5182, 5197), False, 'from collections import defaultdict, namedtuple\n'), ((7442, 7468), 'collections.defaultdict', 'defaultdict', (['(lambda : None)'], {}), '(lambda : None)\n', (7453, 7468), False, 'from collections import defaultdict, namedtuple\n'), ((7835, 7956), 'skcuda.cufft.cufftPlanMany', 'cufft.cufftPlanMany', (['rank', 'n.ctypes.data', 'n.ctypes.data', 'istride', 'idist', 'n.ctypes.data', 'ostride', 'odist', '_type', 'batch'], {}), '(rank, n.ctypes.data, n.ctypes.data, istride, idist, n.\n ctypes.data, ostride, odist, _type, batch)\n', (7854, 7956), False, 'from skcuda import cublas, cufft\n'), ((13474, 13506), 'torch.cuda.current_blas_handle', 'torch.cuda.current_blas_handle', ([], {}), '()\n', (13504, 13506), False, 'import torch\n'), ((577, 592), 'cupy.cuda.device.Device', 'device.Device', ([], {}), '()\n', (590, 592), False, 'from cupy.cuda import device\n'), ((3940, 3948), 'cupy.cuda.function.Module', 'Module', ([], {}), '()\n', (3946, 3948), False, 'from cupy.cuda.function import Module\n'), ((6513, 6521), 'cupy.cuda.function.Module', 'Module', ([], {}), '()\n', (6519, 6521), False, 'from cupy.cuda.function import Module\n'), ((11951, 11961), 'numpy.real', 'np.real', (['x'], {}), '(x)\n', (11958, 11961), True, 'import numpy as np\n'), ((11975, 11985), 'numpy.imag', 'np.imag', (['x'], {}), '(x)\n', (11982, 11985), True, 'import numpy as np\n'), ((8248, 8288), 'skcuda.cufft.cufftDestroy', 'cufft.cufftDestroy', (['self.fft_cache[keys]'], {}), '(self.fft_cache[keys])\n', (8266, 8288), False, 'from skcuda import cublas, cufft\n'), ((13571, 13598), 'torch.cuda.current_stream', 'torch.cuda.current_stream', ([], {}), '()\n', (13596, 13598), False, 'import torch\n'), ((3231, 3247), 'string.Template', 'Template', (['kernel'], {}), '(kernel)\n', (3239, 3247), False, 'from string import Template\n'), ((4922, 4949), 'torch.cuda.current_stream', 'torch.cuda.current_stream', ([], {}), '()\n', (4947, 4949), False, 'import torch\n'), ((7108, 7135), 'torch.cuda.current_stream', 'torch.cuda.current_stream', ([], {}), '()\n', (7133, 7135), False, 'import torch\n'), ((10036, 10057), 'numpy.fft.fft2', 'np.fft.fft2', (['input_np'], {}), '(input_np)\n', (10047, 10057), True, 'import numpy as np\n'), ((9055, 9077), 'numpy.fft.ifft2', 'np.fft.ifft2', (['input_np'], {}), '(input_np)\n', (9067, 9077), True, 'import numpy as np\n'), ((9582, 9603), 'numpy.fft.fft2', 'np.fft.fft2', (['input_np'], {}), '(input_np)\n', (9593, 9603), True, 'import numpy as np\n'), ((9833, 9855), 'numpy.fft.ifft2', 'np.fft.ifft2', (['input_np'], {}), '(input_np)\n', (9845, 9855), True, 'import numpy as np\n'), ((9363, 9385), 'numpy.fft.ifft2', 'np.fft.ifft2', (['input_np'], {}), '(input_np)\n', (9375, 9385), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import csv
plt.rcParams.update({'font.size':18})
coefficient = 0.75
with open('fake_fingerprint_mean.csv','r')as f:
f_csv = csv.reader(f)
for row1 in f_csv:
row1 = [float(i) for i in row1]
with open('fake_fingerprint_std.csv','r')as f:
f_csv = csv.reader(f)
for row2 in f_csv:
row2 = [float(i) for i in row2]
with open('real_fingerprint_mean.csv','r')as f:
f_csv = csv.reader(f)
for row3 in f_csv:
row3 = [float(i) for i in row3]
with open('real_fingerprint_std.csv','r')as f:
f_csv = csv.reader(f)
for row4 in f_csv:
row4 = [float(i) for i in row4]
xf = np.arange(start=0,stop=128,step=1)
row_numpy1 = np.array(row1)
row_numpy2 = np.array(row2)
row_numpy3 = np.array(row3)
row_numpy4 = np.array(row4)
plt.plot(xf,row_numpy1,color='green',linewidth=1.5,label='fake')
plt.plot(xf,row_numpy3,color='darkred',linewidth=1.5,label='real')
plt.legend(title='fingerprint')
plt.fill_between(xf,row_numpy1-coefficient*row_numpy2,row_numpy1+coefficient*row_numpy2,color='lime',alpha=0.35)
plt.fill_between(xf,row_numpy3-coefficient*row_numpy4,row_numpy3+coefficient*row_numpy4,color='red',alpha=0.35)
plt.show() | [
"matplotlib.pyplot.show",
"csv.reader",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.rcParams.update",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.fill_between"
] | [((67, 105), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 18}"], {}), "({'font.size': 18})\n", (86, 105), True, 'import matplotlib.pyplot as plt\n'), ((693, 729), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': '(128)', 'step': '(1)'}), '(start=0, stop=128, step=1)\n', (702, 729), True, 'import numpy as np\n'), ((742, 756), 'numpy.array', 'np.array', (['row1'], {}), '(row1)\n', (750, 756), True, 'import numpy as np\n'), ((771, 785), 'numpy.array', 'np.array', (['row2'], {}), '(row2)\n', (779, 785), True, 'import numpy as np\n'), ((800, 814), 'numpy.array', 'np.array', (['row3'], {}), '(row3)\n', (808, 814), True, 'import numpy as np\n'), ((829, 843), 'numpy.array', 'np.array', (['row4'], {}), '(row4)\n', (837, 843), True, 'import numpy as np\n'), ((845, 913), 'matplotlib.pyplot.plot', 'plt.plot', (['xf', 'row_numpy1'], {'color': '"""green"""', 'linewidth': '(1.5)', 'label': '"""fake"""'}), "(xf, row_numpy1, color='green', linewidth=1.5, label='fake')\n", (853, 913), True, 'import matplotlib.pyplot as plt\n'), ((911, 981), 'matplotlib.pyplot.plot', 'plt.plot', (['xf', 'row_numpy3'], {'color': '"""darkred"""', 'linewidth': '(1.5)', 'label': '"""real"""'}), "(xf, row_numpy3, color='darkred', linewidth=1.5, label='real')\n", (919, 981), True, 'import matplotlib.pyplot as plt\n'), ((979, 1010), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'title': '"""fingerprint"""'}), "(title='fingerprint')\n", (989, 1010), True, 'import matplotlib.pyplot as plt\n'), ((1012, 1141), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['xf', '(row_numpy1 - coefficient * row_numpy2)', '(row_numpy1 + coefficient * row_numpy2)'], {'color': '"""lime"""', 'alpha': '(0.35)'}), "(xf, row_numpy1 - coefficient * row_numpy2, row_numpy1 + \n coefficient * row_numpy2, color='lime', alpha=0.35)\n", (1028, 1141), True, 'import matplotlib.pyplot as plt\n'), ((1126, 1254), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['xf', '(row_numpy3 - coefficient * row_numpy4)', '(row_numpy3 + coefficient * row_numpy4)'], {'color': '"""red"""', 'alpha': '(0.35)'}), "(xf, row_numpy3 - coefficient * row_numpy4, row_numpy3 + \n coefficient * row_numpy4, color='red', alpha=0.35)\n", (1142, 1254), True, 'import matplotlib.pyplot as plt\n'), ((1239, 1249), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1247, 1249), True, 'import matplotlib.pyplot as plt\n'), ((187, 200), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (197, 200), False, 'import csv\n'), ((327, 340), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (337, 340), False, 'import csv\n'), ((468, 481), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (478, 481), False, 'import csv\n'), ((608, 621), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (618, 621), False, 'import csv\n')] |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for PySpark; additional tests are implemented as doctests in
individual modules.
"""
from array import array
from glob import glob
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
import zipfile
import random
import threading
import hashlib
from py4j.protocol import Py4JJavaError
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
if sys.version_info[0] >= 3:
xrange = range
basestring = str
if sys.version >= "3":
from io import StringIO
else:
from StringIO import StringIO
from pyspark import keyword_only
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.rdd import RDD
from pyspark.files import SparkFiles
from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, \
CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, \
PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, \
FlattenedValuesSerializer
from pyspark.shuffle import Aggregator, ExternalMerger, ExternalSorter
from pyspark import shuffle
from pyspark.profiler import BasicProfiler
from pyspark.taskcontext import BarrierTaskContext, TaskContext
_have_scipy = False
_have_numpy = False
try:
import scipy.sparse
_have_scipy = True
except:
# No SciPy, but that's okay, we'll skip those tests
pass
try:
import numpy as np
_have_numpy = True
except:
# No NumPy, but that's okay, we'll skip those tests
pass
SPARK_HOME = os.environ["SPARK_HOME"]
class MergerTests(unittest.TestCase):
def setUp(self):
self.N = 1 << 12
self.l = [i for i in xrange(self.N)]
self.data = list(zip(self.l, self.l))
self.agg = Aggregator(lambda x: [x],
lambda x, y: x.append(y) or x,
lambda x, y: x.extend(y) or x)
def test_small_dataset(self):
m = ExternalMerger(self.agg, 1000)
m.mergeValues(self.data)
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 1000)
m.mergeCombiners(map(lambda x_y1: (x_y1[0], [x_y1[1]]), self.data))
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
def test_medium_dataset(self):
m = ExternalMerger(self.agg, 20)
m.mergeValues(self.data)
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 10)
m.mergeCombiners(map(lambda x_y2: (x_y2[0], [x_y2[1]]), self.data * 3))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)) * 3)
def test_huge_dataset(self):
m = ExternalMerger(self.agg, 5, partitions=3)
m.mergeCombiners(map(lambda k_v: (k_v[0], [str(k_v[1])]), self.data * 10))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(len(v) for k, v in m.items()),
self.N * 10)
m._cleanup()
def test_group_by_key(self):
def gen_data(N, step):
for i in range(1, N + 1, step):
for j in range(i):
yield (i, [j])
def gen_gs(N, step=1):
return shuffle.GroupByKey(gen_data(N, step))
self.assertEqual(1, len(list(gen_gs(1))))
self.assertEqual(2, len(list(gen_gs(2))))
self.assertEqual(100, len(list(gen_gs(100))))
self.assertEqual(list(range(1, 101)), [k for k, _ in gen_gs(100)])
self.assertTrue(all(list(range(k)) == list(vs) for k, vs in gen_gs(100)))
for k, vs in gen_gs(50002, 10000):
self.assertEqual(k, len(vs))
self.assertEqual(list(range(k)), list(vs))
ser = PickleSerializer()
l = ser.loads(ser.dumps(list(gen_gs(50002, 30000))))
for k, vs in l:
self.assertEqual(k, len(vs))
self.assertEqual(list(range(k)), list(vs))
def test_stopiteration_is_raised(self):
def stopit(*args, **kwargs):
raise StopIteration()
def legit_create_combiner(x):
return [x]
def legit_merge_value(x, y):
return x.append(y) or x
def legit_merge_combiners(x, y):
return x.extend(y) or x
data = [(x % 2, x) for x in range(100)]
# wrong create combiner
m = ExternalMerger(Aggregator(stopit, legit_merge_value, legit_merge_combiners), 20)
with self.assertRaises((Py4JJavaError, RuntimeError)) as cm:
m.mergeValues(data)
# wrong merge value
m = ExternalMerger(Aggregator(legit_create_combiner, stopit, legit_merge_combiners), 20)
with self.assertRaises((Py4JJavaError, RuntimeError)) as cm:
m.mergeValues(data)
# wrong merge combiners
m = ExternalMerger(Aggregator(legit_create_combiner, legit_merge_value, stopit), 20)
with self.assertRaises((Py4JJavaError, RuntimeError)) as cm:
m.mergeCombiners(map(lambda x_y1: (x_y1[0], [x_y1[1]]), data))
class SorterTests(unittest.TestCase):
def test_in_memory_sort(self):
l = list(range(1024))
random.shuffle(l)
sorter = ExternalSorter(1024)
self.assertEqual(sorted(l), list(sorter.sorted(l)))
self.assertEqual(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertEqual(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertEqual(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
def test_external_sort(self):
class CustomizedSorter(ExternalSorter):
def _next_limit(self):
return self.memory_limit
l = list(range(1024))
random.shuffle(l)
sorter = CustomizedSorter(1)
self.assertEqual(sorted(l), list(sorter.sorted(l)))
self.assertGreater(shuffle.DiskBytesSpilled, 0)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
def test_external_sort_in_rdd(self):
conf = SparkConf().set("spark.python.worker.memory", "1m")
sc = SparkContext(conf=conf)
l = list(range(10240))
random.shuffle(l)
rdd = sc.parallelize(l, 4)
self.assertEqual(sorted(l), rdd.sortBy(lambda x: x).collect())
sc.stop()
class SerializationTestCase(unittest.TestCase):
def test_namedtuple(self):
from collections import namedtuple
from pickle import dumps, loads
P = namedtuple("P", "x y")
p1 = P(1, 3)
p2 = loads(dumps(p1, 2))
self.assertEqual(p1, p2)
from pyspark.cloudpickle import dumps
P2 = loads(dumps(P))
p3 = P2(1, 3)
self.assertEqual(p1, p3)
def test_itemgetter(self):
from operator import itemgetter
ser = CloudPickleSerializer()
d = range(10)
getter = itemgetter(1)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = itemgetter(0, 3)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
def test_function_module_name(self):
ser = CloudPickleSerializer()
func = lambda x: x
func2 = ser.loads(ser.dumps(func))
self.assertEqual(func.__module__, func2.__module__)
def test_attrgetter(self):
from operator import attrgetter
ser = CloudPickleSerializer()
class C(object):
def __getattr__(self, item):
return item
d = C()
getter = attrgetter("a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("a", "b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
d.e = C()
getter = attrgetter("e.a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("e.a", "e.b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
# Regression test for SPARK-3415
def test_pickling_file_handles(self):
# to be corrected with SPARK-11160
if not xmlrunner:
ser = CloudPickleSerializer()
out1 = sys.stderr
out2 = ser.loads(ser.dumps(out1))
self.assertEqual(out1, out2)
def test_func_globals(self):
class Unpicklable(object):
def __reduce__(self):
raise Exception("not picklable")
global exit
exit = Unpicklable()
ser = CloudPickleSerializer()
self.assertRaises(Exception, lambda: ser.dumps(exit))
def foo():
sys.exit(0)
self.assertTrue("exit" in foo.__code__.co_names)
ser.dumps(foo)
def test_compressed_serializer(self):
ser = CompressedSerializer(PickleSerializer())
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
io = StringIO()
ser.dump_stream(["abc", u"123", range(5)], io)
io.seek(0)
self.assertEqual(["abc", u"123", range(5)], list(ser.load_stream(io)))
ser.dump_stream(range(1000), io)
io.seek(0)
self.assertEqual(["abc", u"123", range(5)] + list(range(1000)), list(ser.load_stream(io)))
io.close()
def test_hash_serializer(self):
hash(NoOpSerializer())
hash(UTF8Deserializer())
hash(PickleSerializer())
hash(MarshalSerializer())
hash(AutoSerializer())
hash(BatchedSerializer(PickleSerializer()))
hash(AutoBatchedSerializer(MarshalSerializer()))
hash(PairDeserializer(NoOpSerializer(), UTF8Deserializer()))
hash(CartesianDeserializer(NoOpSerializer(), UTF8Deserializer()))
hash(CompressedSerializer(PickleSerializer()))
hash(FlattenedValuesSerializer(PickleSerializer()))
class QuietTest(object):
def __init__(self, sc):
self.log4j = sc._jvm.org.apache.log4j
def __enter__(self):
self.old_level = self.log4j.LogManager.getRootLogger().getLevel()
self.log4j.LogManager.getRootLogger().setLevel(self.log4j.Level.FATAL)
def __exit__(self, exc_type, exc_val, exc_tb):
self.log4j.LogManager.getRootLogger().setLevel(self.old_level)
class PySparkTestCase(unittest.TestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
self.sc = SparkContext('local[4]', class_name)
def tearDown(self):
self.sc.stop()
sys.path = self._old_sys_path
class ReusedPySparkTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sc = SparkContext('local[4]', cls.__name__)
@classmethod
def tearDownClass(cls):
cls.sc.stop()
class CheckpointTests(ReusedPySparkTestCase):
def setUp(self):
self.checkpointDir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.checkpointDir.name)
self.sc.setCheckpointDir(self.checkpointDir.name)
def tearDown(self):
shutil.rmtree(self.checkpointDir.name)
def test_basic_checkpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
self.assertEqual("file:" + self.checkpointDir.name,
os.path.dirname(os.path.dirname(flatMappedRDD.getCheckpointFile())))
def test_checkpoint_and_restore(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: [x])
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
flatMappedRDD.count() # forces a checkpoint to be computed
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.getCheckpointFile() is not None)
recovered = self.sc._checkpointFile(flatMappedRDD.getCheckpointFile(),
flatMappedRDD._jrdd_deserializer)
self.assertEqual([1, 2, 3, 4], recovered.collect())
class LocalCheckpointTests(ReusedPySparkTestCase):
def test_basic_localcheckpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertFalse(flatMappedRDD.isLocallyCheckpointed())
flatMappedRDD.localCheckpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.isLocallyCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
class AddFileTests(PySparkTestCase):
def test_add_py_file(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this job fails due to `userlibrary` not being on the Python path:
# disable logging in log4j temporarily
def func(x):
from userlibrary import UserClass
return UserClass().hello()
with QuietTest(self.sc):
self.assertRaises(Exception, self.sc.parallelize(range(2)).map(func).first)
# Add the file, so the job should now succeed:
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
res = self.sc.parallelize(range(2)).map(func).first()
self.assertEqual("Hello World!", res)
def test_add_file_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
self.sc.addFile(path)
download_path = SparkFiles.get("hello.txt")
self.assertNotEqual(path, download_path)
with open(download_path) as test_file:
self.assertEqual("Hello World!\n", test_file.readline())
def test_add_file_recursively_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello")
self.sc.addFile(path, True)
download_path = SparkFiles.get("hello")
self.assertNotEqual(path, download_path)
with open(download_path + "/hello.txt") as test_file:
self.assertEqual("Hello World!\n", test_file.readline())
with open(download_path + "/sub_hello/sub_hello.txt") as test_file:
self.assertEqual("Sub Hello World!\n", test_file.readline())
def test_add_py_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlibrary import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
from userlibrary import UserClass
self.assertEqual("Hello World!", UserClass().hello())
def test_add_egg_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlib import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlib-0.1.zip")
self.sc.addPyFile(path)
from userlib import UserClass
self.assertEqual("Hello World from inside a package!", UserClass().hello())
def test_overwrite_system_module(self):
self.sc.addPyFile(os.path.join(SPARK_HOME, "python/test_support/SimpleHTTPServer.py"))
import SimpleHTTPServer
self.assertEqual("My Server", SimpleHTTPServer.__name__)
def func(x):
import SimpleHTTPServer
return SimpleHTTPServer.__name__
self.assertEqual(["My Server"], self.sc.parallelize(range(1)).map(func).collect())
class TaskContextTests(PySparkTestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
# Allow retries even though they are normally disabled in local mode
self.sc = SparkContext('local[4, 2]', class_name)
def test_stage_id(self):
"""Test the stage ids are available and incrementing as expected."""
rdd = self.sc.parallelize(range(10))
stage1 = rdd.map(lambda x: TaskContext.get().stageId()).take(1)[0]
stage2 = rdd.map(lambda x: TaskContext.get().stageId()).take(1)[0]
# Test using the constructor directly rather than the get()
stage3 = rdd.map(lambda x: TaskContext().stageId()).take(1)[0]
self.assertEqual(stage1 + 1, stage2)
self.assertEqual(stage1 + 2, stage3)
self.assertEqual(stage2 + 1, stage3)
def test_partition_id(self):
"""Test the partition id."""
rdd1 = self.sc.parallelize(range(10), 1)
rdd2 = self.sc.parallelize(range(10), 2)
pids1 = rdd1.map(lambda x: TaskContext.get().partitionId()).collect()
pids2 = rdd2.map(lambda x: TaskContext.get().partitionId()).collect()
self.assertEqual(0, pids1[0])
self.assertEqual(0, pids1[9])
self.assertEqual(0, pids2[0])
self.assertEqual(1, pids2[9])
def test_attempt_number(self):
"""Verify the attempt numbers are correctly reported."""
rdd = self.sc.parallelize(range(10))
# Verify a simple job with no failures
attempt_numbers = rdd.map(lambda x: TaskContext.get().attemptNumber()).collect()
map(lambda attempt: self.assertEqual(0, attempt), attempt_numbers)
def fail_on_first(x):
"""Fail on the first attempt so we get a positive attempt number"""
tc = TaskContext.get()
attempt_number = tc.attemptNumber()
partition_id = tc.partitionId()
attempt_id = tc.taskAttemptId()
if attempt_number == 0 and partition_id == 0:
raise Exception("Failing on first attempt")
else:
return [x, partition_id, attempt_number, attempt_id]
result = rdd.map(fail_on_first).collect()
# We should re-submit the first partition to it but other partitions should be attempt 0
self.assertEqual([0, 0, 1], result[0][0:3])
self.assertEqual([9, 3, 0], result[9][0:3])
first_partition = filter(lambda x: x[1] == 0, result)
map(lambda x: self.assertEqual(1, x[2]), first_partition)
other_partitions = filter(lambda x: x[1] != 0, result)
map(lambda x: self.assertEqual(0, x[2]), other_partitions)
# The task attempt id should be different
self.assertTrue(result[0][3] != result[9][3])
def test_tc_on_driver(self):
"""Verify that getting the TaskContext on the driver returns None."""
tc = TaskContext.get()
self.assertTrue(tc is None)
def test_get_local_property(self):
"""Verify that local properties set on the driver are available in TaskContext."""
key = "testkey"
value = "testvalue"
self.sc.setLocalProperty(key, value)
try:
rdd = self.sc.parallelize(range(1), 1)
prop1 = rdd.map(lambda _: TaskContext.get().getLocalProperty(key)).collect()[0]
self.assertEqual(prop1, value)
prop2 = rdd.map(lambda _: TaskContext.get().getLocalProperty("otherkey")).collect()[0]
self.assertTrue(prop2 is None)
finally:
self.sc.setLocalProperty(key, None)
def test_barrier(self):
"""
Verify that BarrierTaskContext.barrier() performs global sync among all barrier tasks
within a stage.
"""
rdd = self.sc.parallelize(range(10), 4)
def f(iterator):
yield sum(iterator)
def context_barrier(x):
tc = BarrierTaskContext.get()
time.sleep(random.randint(1, 10))
tc.barrier()
return time.time()
times = rdd.barrier().mapPartitions(f).map(context_barrier).collect()
self.assertTrue(max(times) - min(times) < 1)
def test_barrier_infos(self):
"""
Verify that BarrierTaskContext.getTaskInfos() returns a list of all task infos in the
barrier stage.
"""
rdd = self.sc.parallelize(range(10), 4)
def f(iterator):
yield sum(iterator)
taskInfos = rdd.barrier().mapPartitions(f).map(lambda x: BarrierTaskContext.get()
.getTaskInfos()).collect()
self.assertTrue(len(taskInfos) == 4)
self.assertTrue(len(taskInfos[0]) == 4)
class RDDTests(ReusedPySparkTestCase):
def test_range(self):
self.assertEqual(self.sc.range(1, 1).count(), 0)
self.assertEqual(self.sc.range(1, 0, -1).count(), 1)
self.assertEqual(self.sc.range(0, 1 << 40, 1 << 39).count(), 2)
def test_id(self):
rdd = self.sc.parallelize(range(10))
id = rdd.id()
self.assertEqual(id, rdd.id())
rdd2 = rdd.map(str).filter(bool)
id2 = rdd2.id()
self.assertEqual(id + 1, id2)
self.assertEqual(id2, rdd2.id())
def test_empty_rdd(self):
rdd = self.sc.emptyRDD()
self.assertTrue(rdd.isEmpty())
def test_sum(self):
self.assertEqual(0, self.sc.emptyRDD().sum())
self.assertEqual(6, self.sc.parallelize([1, 2, 3]).sum())
def test_to_localiterator(self):
from time import sleep
rdd = self.sc.parallelize([1, 2, 3])
it = rdd.toLocalIterator()
sleep(5)
self.assertEqual([1, 2, 3], sorted(it))
rdd2 = rdd.repartition(1000)
it2 = rdd2.toLocalIterator()
sleep(5)
self.assertEqual([1, 2, 3], sorted(it2))
def test_save_as_textfile_with_unicode(self):
# Regression test for SPARK-970
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = b''.join(open(p, 'rb').read()
for p in glob(tempFile.name + "/part-0000*"))
self.assertEqual(x, raw_contents.strip().decode("utf-8"))
def test_save_as_textfile_with_utf8(self):
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x.encode("utf-8")])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = b''.join(open(p, 'rb').read()
for p in glob(tempFile.name + "/part-0000*"))
self.assertEqual(x, raw_contents.strip().decode('utf8'))
def test_transforming_cartesian_result(self):
# Regression test for SPARK-1034
rdd1 = self.sc.parallelize([1, 2])
rdd2 = self.sc.parallelize([3, 4])
cart = rdd1.cartesian(rdd2)
result = cart.map(lambda x_y3: x_y3[0] + x_y3[1]).collect()
def test_transforming_pickle_file(self):
# Regression test for SPARK-2601
data = self.sc.parallelize([u"Hello", u"World!"])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsPickleFile(tempFile.name)
pickled_file = self.sc.pickleFile(tempFile.name)
pickled_file.map(lambda x: x).collect()
def test_cartesian_on_textfile(self):
# Regression test for
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
a = self.sc.textFile(path)
result = a.cartesian(a).collect()
(x, y) = result[0]
self.assertEqual(u"Hello World!", x.strip())
self.assertEqual(u"Hello World!", y.strip())
def test_cartesian_chaining(self):
# Tests for SPARK-16589
rdd = self.sc.parallelize(range(10), 2)
self.assertSetEqual(
set(rdd.cartesian(rdd).cartesian(rdd).collect()),
set([((x, y), z) for x in range(10) for y in range(10) for z in range(10)])
)
self.assertSetEqual(
set(rdd.cartesian(rdd.cartesian(rdd)).collect()),
set([(x, (y, z)) for x in range(10) for y in range(10) for z in range(10)])
)
self.assertSetEqual(
set(rdd.cartesian(rdd.zip(rdd)).collect()),
set([(x, (y, y)) for x in range(10) for y in range(10)])
)
def test_zip_chaining(self):
# Tests for SPARK-21985
rdd = self.sc.parallelize('abc', 2)
self.assertSetEqual(
set(rdd.zip(rdd).zip(rdd).collect()),
set([((x, x), x) for x in 'abc'])
)
self.assertSetEqual(
set(rdd.zip(rdd.zip(rdd)).collect()),
set([(x, (x, x)) for x in 'abc'])
)
def test_deleting_input_files(self):
# Regression test for SPARK-1025
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
def test_sampling_default_seed(self):
# Test for SPARK-3995 (default seed setting)
data = self.sc.parallelize(xrange(1000), 1)
subset = data.takeSample(False, 10)
self.assertEqual(len(subset), 10)
def test_aggregate_mutable_zero_value(self):
# Test for SPARK-9021; uses aggregate and treeAggregate to build dict
# representing a counter of ints
# NOTE: dict is used instead of collections.Counter for Python 2.6
# compatibility
from collections import defaultdict
# Show that single or multiple partitions work
data1 = self.sc.range(10, numSlices=1)
data2 = self.sc.range(10, numSlices=2)
def seqOp(x, y):
x[y] += 1
return x
def comboOp(x, y):
for key, val in y.items():
x[key] += val
return x
counts1 = data1.aggregate(defaultdict(int), seqOp, comboOp)
counts2 = data2.aggregate(defaultdict(int), seqOp, comboOp)
counts3 = data1.treeAggregate(defaultdict(int), seqOp, comboOp, 2)
counts4 = data2.treeAggregate(defaultdict(int), seqOp, comboOp, 2)
ground_truth = defaultdict(int, dict((i, 1) for i in range(10)))
self.assertEqual(counts1, ground_truth)
self.assertEqual(counts2, ground_truth)
self.assertEqual(counts3, ground_truth)
self.assertEqual(counts4, ground_truth)
def test_aggregate_by_key_mutable_zero_value(self):
# Test for SPARK-9021; uses aggregateByKey to make a pair RDD that
# contains lists of all values for each key in the original RDD
# list(range(...)) for Python 3.x compatibility (can't use * operator
# on a range object)
# list(zip(...)) for Python 3.x compatibility (want to parallelize a
# collection, not a zip object)
tuples = list(zip(list(range(10))*2, [1]*20))
# Show that single or multiple partitions work
data1 = self.sc.parallelize(tuples, 1)
data2 = self.sc.parallelize(tuples, 2)
def seqOp(x, y):
x.append(y)
return x
def comboOp(x, y):
x.extend(y)
return x
values1 = data1.aggregateByKey([], seqOp, comboOp).collect()
values2 = data2.aggregateByKey([], seqOp, comboOp).collect()
# Sort lists to ensure clean comparison with ground_truth
values1.sort()
values2.sort()
ground_truth = [(i, [1]*2) for i in range(10)]
self.assertEqual(values1, ground_truth)
self.assertEqual(values2, ground_truth)
def test_fold_mutable_zero_value(self):
# Test for SPARK-9021; uses fold to merge an RDD of dict counters into
# a single dict
# NOTE: dict is used instead of collections.Counter for Python 2.6
# compatibility
from collections import defaultdict
counts1 = defaultdict(int, dict((i, 1) for i in range(10)))
counts2 = defaultdict(int, dict((i, 1) for i in range(3, 8)))
counts3 = defaultdict(int, dict((i, 1) for i in range(4, 7)))
counts4 = defaultdict(int, dict((i, 1) for i in range(5, 6)))
all_counts = [counts1, counts2, counts3, counts4]
# Show that single or multiple partitions work
data1 = self.sc.parallelize(all_counts, 1)
data2 = self.sc.parallelize(all_counts, 2)
def comboOp(x, y):
for key, val in y.items():
x[key] += val
return x
fold1 = data1.fold(defaultdict(int), comboOp)
fold2 = data2.fold(defaultdict(int), comboOp)
ground_truth = defaultdict(int)
for counts in all_counts:
for key, val in counts.items():
ground_truth[key] += val
self.assertEqual(fold1, ground_truth)
self.assertEqual(fold2, ground_truth)
def test_fold_by_key_mutable_zero_value(self):
# Test for SPARK-9021; uses foldByKey to make a pair RDD that contains
# lists of all values for each key in the original RDD
tuples = [(i, range(i)) for i in range(10)]*2
# Show that single or multiple partitions work
data1 = self.sc.parallelize(tuples, 1)
data2 = self.sc.parallelize(tuples, 2)
def comboOp(x, y):
x.extend(y)
return x
values1 = data1.foldByKey([], comboOp).collect()
values2 = data2.foldByKey([], comboOp).collect()
# Sort lists to ensure clean comparison with ground_truth
values1.sort()
values2.sort()
# list(range(...)) for Python 3.x compatibility
ground_truth = [(i, list(range(i))*2) for i in range(10)]
self.assertEqual(values1, ground_truth)
self.assertEqual(values2, ground_truth)
def test_aggregate_by_key(self):
data = self.sc.parallelize([(1, 1), (1, 1), (3, 2), (5, 1), (5, 3)], 2)
def seqOp(x, y):
x.add(y)
return x
def combOp(x, y):
x |= y
return x
sets = dict(data.aggregateByKey(set(), seqOp, combOp).collect())
self.assertEqual(3, len(sets))
self.assertEqual(set([1]), sets[1])
self.assertEqual(set([2]), sets[3])
self.assertEqual(set([1, 3]), sets[5])
def test_itemgetter(self):
rdd = self.sc.parallelize([range(10)])
from operator import itemgetter
self.assertEqual([1], rdd.map(itemgetter(1)).collect())
self.assertEqual([(2, 3)], rdd.map(itemgetter(2, 3)).collect())
def test_namedtuple_in_rdd(self):
from collections import namedtuple
Person = namedtuple("Person", "id firstName lastName")
jon = Person(1, "Jon", "Doe")
jane = Person(2, "Jane", "Doe")
theDoes = self.sc.parallelize([jon, jane])
self.assertEqual([jon, jane], theDoes.collect())
def test_large_broadcast(self):
N = 10000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 27MB
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEqual(N, m)
def test_unpersist(self):
N = 1000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 3MB
bdata.unpersist()
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEqual(N, m)
bdata.destroy()
try:
self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
except Exception as e:
pass
else:
raise Exception("job should fail after destroy the broadcast")
def test_multiple_broadcasts(self):
N = 1 << 21
b1 = self.sc.broadcast(set(range(N))) # multiple blocks in JVM
r = list(range(1 << 15))
random.shuffle(r)
s = str(r).encode()
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
random.shuffle(r)
s = str(r).encode()
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
def test_multithread_broadcast_pickle(self):
import threading
b1 = self.sc.broadcast(list(range(3)))
b2 = self.sc.broadcast(list(range(3)))
def f1():
return b1.value
def f2():
return b2.value
funcs_num_pickled = {f1: None, f2: None}
def do_pickle(f, sc):
command = (f, None, sc.serializer, sc.serializer)
ser = CloudPickleSerializer()
ser.dumps(command)
def process_vars(sc):
broadcast_vars = list(sc._pickled_broadcast_vars)
num_pickled = len(broadcast_vars)
sc._pickled_broadcast_vars.clear()
return num_pickled
def run(f, sc):
do_pickle(f, sc)
funcs_num_pickled[f] = process_vars(sc)
# pickle f1, adds b1 to sc._pickled_broadcast_vars in main thread local storage
do_pickle(f1, self.sc)
# run all for f2, should only add/count/clear b2 from worker thread local storage
t = threading.Thread(target=run, args=(f2, self.sc))
t.start()
t.join()
# count number of vars pickled in main thread, only b1 should be counted and cleared
funcs_num_pickled[f1] = process_vars(self.sc)
self.assertEqual(funcs_num_pickled[f1], 1)
self.assertEqual(funcs_num_pickled[f2], 1)
self.assertEqual(len(list(self.sc._pickled_broadcast_vars)), 0)
def test_large_closure(self):
N = 200000
data = [float(i) for i in xrange(N)]
rdd = self.sc.parallelize(range(1), 1).map(lambda x: len(data))
self.assertEqual(N, rdd.first())
# regression test for SPARK-6886
self.assertEqual(1, rdd.map(lambda x: (x, 1)).groupByKey().count())
def test_zip_with_different_serializers(self):
a = self.sc.parallelize(range(5))
b = self.sc.parallelize(range(100, 105))
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
a = a._reserialize(BatchedSerializer(PickleSerializer(), 2))
b = b._reserialize(MarshalSerializer())
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
# regression test for SPARK-4841
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
t = self.sc.textFile(path)
cnt = t.count()
self.assertEqual(cnt, t.zip(t).count())
rdd = t.map(str)
self.assertEqual(cnt, t.zip(rdd).count())
# regression test for bug in _reserializer()
self.assertEqual(cnt, t.zip(rdd).count())
def test_zip_with_different_object_sizes(self):
# regress test for SPARK-5973
a = self.sc.parallelize(xrange(10000)).map(lambda i: '*' * i)
b = self.sc.parallelize(xrange(10000, 20000)).map(lambda i: '*' * i)
self.assertEqual(10000, a.zip(b).count())
def test_zip_with_different_number_of_items(self):
a = self.sc.parallelize(range(5), 2)
# different number of partitions
b = self.sc.parallelize(range(100, 106), 3)
self.assertRaises(ValueError, lambda: a.zip(b))
with QuietTest(self.sc):
# different number of batched items in JVM
b = self.sc.parallelize(range(100, 104), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# different number of items in one pair
b = self.sc.parallelize(range(100, 106), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# same total number of items, but different distributions
a = self.sc.parallelize([2, 3], 2).flatMap(range)
b = self.sc.parallelize([3, 2], 2).flatMap(range)
self.assertEqual(a.count(), b.count())
self.assertRaises(Exception, lambda: a.zip(b).count())
def test_count_approx_distinct(self):
rdd = self.sc.parallelize(xrange(1000))
self.assertTrue(950 < rdd.countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(float).countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(str).countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(lambda x: (x, -x)).countApproxDistinct(0.03) < 1050)
rdd = self.sc.parallelize([i % 20 for i in range(1000)], 7)
self.assertTrue(18 < rdd.countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(float).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(str).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(lambda x: (x, -x)).countApproxDistinct() < 22)
self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.00000001))
def test_histogram(self):
# empty
rdd = self.sc.parallelize([])
self.assertEqual([0], rdd.histogram([0, 10])[1])
self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])
self.assertRaises(ValueError, lambda: rdd.histogram(1))
# out of range
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0], rdd.histogram([0, 10])[1])
self.assertEqual([0, 0], rdd.histogram((0, 4, 10))[1])
# in range with one bucket
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual([4], rdd.histogram([0, 10])[1])
self.assertEqual([3, 1], rdd.histogram([0, 4, 10])[1])
# in range with one bucket exact match
self.assertEqual([4], rdd.histogram([1, 4])[1])
# out of range with two buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0, 0], rdd.histogram([0, 5, 10])[1])
# out of range with two uneven buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])
# in range with two buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two bucket and None
rdd = self.sc.parallelize([1, 2, 3, 5, 6, None, float('nan')])
self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two uneven buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEqual([3, 2], rdd.histogram([0, 5, 11])[1])
# mixed range with two uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.0, 11.01])
self.assertEqual([4, 3], rdd.histogram([0, 5, 11])[1])
# mixed range with four uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0, 199.0, 200.0, 200.1])
self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# mixed range with uneven buckets and NaN
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0,
199.0, 200.0, 200.1, None, float('nan')])
self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# out of range with infinite buckets
rdd = self.sc.parallelize([10.01, -0.01, float('nan'), float("inf")])
self.assertEqual([1, 2], rdd.histogram([float('-inf'), 0, float('inf')])[1])
# invalid buckets
self.assertRaises(ValueError, lambda: rdd.histogram([]))
self.assertRaises(ValueError, lambda: rdd.histogram([1]))
self.assertRaises(ValueError, lambda: rdd.histogram(0))
self.assertRaises(TypeError, lambda: rdd.histogram({}))
# without buckets
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual(([1, 4], [4]), rdd.histogram(1))
# without buckets single element
rdd = self.sc.parallelize([1])
self.assertEqual(([1, 1], [1]), rdd.histogram(1))
# without bucket no range
rdd = self.sc.parallelize([1] * 4)
self.assertEqual(([1, 1], [4]), rdd.histogram(1))
# without buckets basic two
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual(([1, 2.5, 4], [2, 2]), rdd.histogram(2))
# without buckets with more requested than elements
rdd = self.sc.parallelize([1, 2])
buckets = [1 + 0.2 * i for i in range(6)]
hist = [1, 0, 0, 0, 1]
self.assertEqual((buckets, hist), rdd.histogram(5))
# invalid RDDs
rdd = self.sc.parallelize([1, float('inf')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
rdd = self.sc.parallelize([float('nan')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
# string
rdd = self.sc.parallelize(["ab", "ac", "b", "bd", "ef"], 2)
self.assertEqual([2, 2], rdd.histogram(["a", "b", "c"])[1])
self.assertEqual((["ab", "ef"], [5]), rdd.histogram(1))
self.assertRaises(TypeError, lambda: rdd.histogram(2))
def test_repartitionAndSortWithinPartitions_asc(self):
rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2)
repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2, True)
partitions = repartitioned.glom().collect()
self.assertEqual(partitions[0], [(0, 5), (0, 8), (2, 6)])
self.assertEqual(partitions[1], [(1, 3), (3, 8), (3, 8)])
def test_repartitionAndSortWithinPartitions_desc(self):
rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2)
repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2, False)
partitions = repartitioned.glom().collect()
self.assertEqual(partitions[0], [(2, 6), (0, 5), (0, 8)])
self.assertEqual(partitions[1], [(3, 8), (3, 8), (1, 3)])
def test_repartition_no_skewed(self):
num_partitions = 20
a = self.sc.parallelize(range(int(1000)), 2)
l = a.repartition(num_partitions).glom().map(len).collect()
zeros = len([x for x in l if x == 0])
self.assertTrue(zeros == 0)
l = a.coalesce(num_partitions, True).glom().map(len).collect()
zeros = len([x for x in l if x == 0])
self.assertTrue(zeros == 0)
def test_repartition_on_textfile(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
rdd = self.sc.textFile(path)
result = rdd.repartition(1).collect()
self.assertEqual(u"Hello World!", result[0])
def test_distinct(self):
rdd = self.sc.parallelize((1, 2, 3)*10, 10)
self.assertEqual(rdd.getNumPartitions(), 10)
self.assertEqual(rdd.distinct().count(), 3)
result = rdd.distinct(5)
self.assertEqual(result.getNumPartitions(), 5)
self.assertEqual(result.count(), 3)
def test_external_group_by_key(self):
self.sc._conf.set("spark.python.worker.memory", "1m")
N = 200001
kv = self.sc.parallelize(xrange(N)).map(lambda x: (x % 3, x))
gkv = kv.groupByKey().cache()
self.assertEqual(3, gkv.count())
filtered = gkv.filter(lambda kv: kv[0] == 1)
self.assertEqual(1, filtered.count())
self.assertEqual([(1, N // 3)], filtered.mapValues(len).collect())
self.assertEqual([(N // 3, N // 3)],
filtered.values().map(lambda x: (len(x), len(list(x)))).collect())
result = filtered.collect()[0][1]
self.assertEqual(N // 3, len(result))
self.assertTrue(isinstance(result.data, shuffle.ExternalListOfList))
def test_sort_on_empty_rdd(self):
self.assertEqual([], self.sc.parallelize(zip([], [])).sortByKey().collect())
def test_sample(self):
rdd = self.sc.parallelize(range(0, 100), 4)
wo = rdd.sample(False, 0.1, 2).collect()
wo_dup = rdd.sample(False, 0.1, 2).collect()
self.assertSetEqual(set(wo), set(wo_dup))
wr = rdd.sample(True, 0.2, 5).collect()
wr_dup = rdd.sample(True, 0.2, 5).collect()
self.assertSetEqual(set(wr), set(wr_dup))
wo_s10 = rdd.sample(False, 0.3, 10).collect()
wo_s20 = rdd.sample(False, 0.3, 20).collect()
self.assertNotEqual(set(wo_s10), set(wo_s20))
wr_s11 = rdd.sample(True, 0.4, 11).collect()
wr_s21 = rdd.sample(True, 0.4, 21).collect()
self.assertNotEqual(set(wr_s11), set(wr_s21))
def test_null_in_rdd(self):
jrdd = self.sc._jvm.PythonUtils.generateRDDWithNull(self.sc._jsc)
rdd = RDD(jrdd, self.sc, UTF8Deserializer())
self.assertEqual([u"a", None, u"b"], rdd.collect())
rdd = RDD(jrdd, self.sc, NoOpSerializer())
self.assertEqual([b"a", None, b"b"], rdd.collect())
def test_multiple_python_java_RDD_conversions(self):
# Regression test for SPARK-5361
data = [
(u'1', {u'director': u'<NAME>'}),
(u'2', {u'director': u'<NAME>'})
]
data_rdd = self.sc.parallelize(data)
data_java_rdd = data_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDeUtil.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
# conversion between python and java RDD threw exceptions
data_java_rdd = converted_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDeUtil.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
def test_narrow_dependency_in_join(self):
rdd = self.sc.parallelize(range(10)).map(lambda x: (x, x))
parted = rdd.partitionBy(2)
self.assertEqual(2, parted.union(parted).getNumPartitions())
self.assertEqual(rdd.getNumPartitions() + 2, parted.union(rdd).getNumPartitions())
self.assertEqual(rdd.getNumPartitions() + 2, rdd.union(parted).getNumPartitions())
tracker = self.sc.statusTracker()
self.sc.setJobGroup("test1", "test", True)
d = sorted(parted.join(parted).collect())
self.assertEqual(10, len(d))
self.assertEqual((0, (0, 0)), d[0])
jobId = tracker.getJobIdsForGroup("test1")[0]
self.assertEqual(2, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test2", "test", True)
d = sorted(parted.join(rdd).collect())
self.assertEqual(10, len(d))
self.assertEqual((0, (0, 0)), d[0])
jobId = tracker.getJobIdsForGroup("test2")[0]
self.assertEqual(3, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test3", "test", True)
d = sorted(parted.cogroup(parted).collect())
self.assertEqual(10, len(d))
self.assertEqual([[0], [0]], list(map(list, d[0][1])))
jobId = tracker.getJobIdsForGroup("test3")[0]
self.assertEqual(2, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test4", "test", True)
d = sorted(parted.cogroup(rdd).collect())
self.assertEqual(10, len(d))
self.assertEqual([[0], [0]], list(map(list, d[0][1])))
jobId = tracker.getJobIdsForGroup("test4")[0]
self.assertEqual(3, len(tracker.getJobInfo(jobId).stageIds))
# Regression test for SPARK-6294
def test_take_on_jrdd(self):
rdd = self.sc.parallelize(xrange(1 << 20)).map(lambda x: str(x))
rdd._jrdd.first()
def test_sortByKey_uses_all_partitions_not_only_first_and_last(self):
# Regression test for SPARK-5969
seq = [(i * 59 % 101, i) for i in range(101)] # unsorted sequence
rdd = self.sc.parallelize(seq)
for ascending in [True, False]:
sort = rdd.sortByKey(ascending=ascending, numPartitions=5)
self.assertEqual(sort.collect(), sorted(seq, reverse=not ascending))
sizes = sort.glom().map(len).collect()
for size in sizes:
self.assertGreater(size, 0)
def test_pipe_functions(self):
data = ['1', '2', '3']
rdd = self.sc.parallelize(data)
with QuietTest(self.sc):
self.assertEqual([], rdd.pipe('cc').collect())
self.assertRaises(Py4JJavaError, rdd.pipe('cc', checkCode=True).collect)
result = rdd.pipe('cat').collect()
result.sort()
for x, y in zip(data, result):
self.assertEqual(x, y)
self.assertRaises(Py4JJavaError, rdd.pipe('grep 4', checkCode=True).collect)
self.assertEqual([], rdd.pipe('grep 4').collect())
def test_pipe_unicode(self):
# Regression test for SPARK-20947
data = [u'\u6d4b\u8bd5', '1']
rdd = self.sc.parallelize(data)
result = rdd.pipe('cat').collect()
self.assertEqual(data, result)
def test_stopiteration_in_user_code(self):
def stopit(*x):
raise StopIteration()
seq_rdd = self.sc.parallelize(range(10))
keyed_rdd = self.sc.parallelize((x % 2, x) for x in range(10))
msg = "Caught StopIteration thrown from user's code; failing the task"
self.assertRaisesRegexp(Py4JJavaError, msg, seq_rdd.map(stopit).collect)
self.assertRaisesRegexp(Py4JJavaError, msg, seq_rdd.filter(stopit).collect)
self.assertRaisesRegexp(Py4JJavaError, msg, seq_rdd.foreach, stopit)
self.assertRaisesRegexp(Py4JJavaError, msg, seq_rdd.reduce, stopit)
self.assertRaisesRegexp(Py4JJavaError, msg, seq_rdd.fold, 0, stopit)
self.assertRaisesRegexp(Py4JJavaError, msg, seq_rdd.foreach, stopit)
self.assertRaisesRegexp(Py4JJavaError, msg,
seq_rdd.cartesian(seq_rdd).flatMap(stopit).collect)
# these methods call the user function both in the driver and in the executor
# the exception raised is different according to where the StopIteration happens
# RuntimeError is raised if in the driver
# Py4JJavaError is raised if in the executor (wraps the RuntimeError raised in the worker)
self.assertRaisesRegexp((Py4JJavaError, RuntimeError), msg,
keyed_rdd.reduceByKeyLocally, stopit)
self.assertRaisesRegexp((Py4JJavaError, RuntimeError), msg,
seq_rdd.aggregate, 0, stopit, lambda *x: 1)
self.assertRaisesRegexp((Py4JJavaError, RuntimeError), msg,
seq_rdd.aggregate, 0, lambda *x: 1, stopit)
class ProfilerTests(PySparkTestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
conf = SparkConf().set("spark.python.profile", "true")
self.sc = SparkContext('local[4]', class_name, conf=conf)
def test_profiler(self):
self.do_computation()
profilers = self.sc.profiler_collector.profilers
self.assertEqual(1, len(profilers))
id, profiler, _ = profilers[0]
stats = profiler.stats()
self.assertTrue(stats is not None)
width, stat_list = stats.get_print_list([])
func_names = [func_name for fname, n, func_name in stat_list]
self.assertTrue("heavy_foo" in func_names)
old_stdout = sys.stdout
sys.stdout = io = StringIO()
self.sc.show_profiles()
self.assertTrue("heavy_foo" in io.getvalue())
sys.stdout = old_stdout
d = tempfile.gettempdir()
self.sc.dump_profiles(d)
self.assertTrue("rdd_%d.pstats" % id in os.listdir(d))
def test_custom_profiler(self):
class TestCustomProfiler(BasicProfiler):
def show(self, id):
self.result = "Custom formatting"
self.sc.profiler_collector.profiler_cls = TestCustomProfiler
self.do_computation()
profilers = self.sc.profiler_collector.profilers
self.assertEqual(1, len(profilers))
_, profiler, _ = profilers[0]
self.assertTrue(isinstance(profiler, TestCustomProfiler))
self.sc.show_profiles()
self.assertEqual("Custom formatting", profiler.result)
def do_computation(self):
def heavy_foo(x):
for i in range(1 << 18):
x = 1
rdd = self.sc.parallelize(range(100))
rdd.foreach(heavy_foo)
class ProfilerTests2(unittest.TestCase):
def test_profiler_disabled(self):
sc = SparkContext(conf=SparkConf().set("spark.python.profile", "false"))
try:
self.assertRaisesRegexp(
RuntimeError,
"'spark.python.profile' configuration must be set",
lambda: sc.show_profiles())
self.assertRaisesRegexp(
RuntimeError,
"'spark.python.profile' configuration must be set",
lambda: sc.dump_profiles("/tmp/abc"))
finally:
sc.stop()
class InputFormatTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.sc._jvm.WriteInputFormatTestDataGenerator.generateData(cls.tempdir.name, cls.sc._jsc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name)
@unittest.skipIf(sys.version >= "3", "serialize array of byte")
def test_sequencefiles(self):
basepath = self.tempdir.name
ints = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
doubles = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfdouble/",
"org.apache.hadoop.io.DoubleWritable",
"org.apache.hadoop.io.Text").collect())
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.assertEqual(doubles, ed)
bytes = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbytes/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BytesWritable").collect())
ebs = [(1, bytearray('aa', 'utf-8')),
(1, bytearray('aa', 'utf-8')),
(2, bytearray('aa', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(3, bytearray('cc', 'utf-8'))]
self.assertEqual(bytes, ebs)
text = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sftext/",
"org.apache.hadoop.io.Text",
"org.apache.hadoop.io.Text").collect())
et = [(u'1', u'aa'),
(u'1', u'aa'),
(u'2', u'aa'),
(u'2', u'bb'),
(u'2', u'bb'),
(u'3', u'cc')]
self.assertEqual(text, et)
bools = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbool/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.assertEqual(bools, eb)
nulls = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfnull/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.assertEqual(nulls, en)
maps = self.sc.sequenceFile(basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect()
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
for v in maps:
self.assertTrue(v in em)
# arrays get pickled to tuples by default
tuples = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable").collect())
et = [(1, ()),
(2, (3.0, 4.0, 5.0)),
(3, (4.0, 5.0, 6.0))]
self.assertEqual(tuples, et)
# with custom converters, primitive arrays can stay as arrays
arrays = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
ea = [(1, array('d')),
(2, array('d', [3.0, 4.0, 5.0])),
(3, array('d', [4.0, 5.0, 6.0]))]
self.assertEqual(arrays, ea)
clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable").collect())
cname = u'org.apache.spark.api.python.TestWritable'
ec = [(u'1', {u'__class__': cname, u'double': 1.0, u'int': 1, u'str': u'test1'}),
(u'2', {u'__class__': cname, u'double': 2.3, u'int': 2, u'str': u'test2'}),
(u'3', {u'__class__': cname, u'double': 3.1, u'int': 3, u'str': u'test3'}),
(u'4', {u'__class__': cname, u'double': 4.2, u'int': 4, u'str': u'test4'}),
(u'5', {u'__class__': cname, u'double': 5.5, u'int': 5, u'str': u'test56'})]
self.assertEqual(clazz, ec)
unbatched_clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable",
).collect())
self.assertEqual(unbatched_clazz, ec)
def test_oldhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.hadoopFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
oldconf = {"mapreduce.input.fileinputformat.inputdir": hellopath}
hello = self.sc.hadoopRDD("org.apache.hadoop.mapred.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=oldconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
newconf = {"mapreduce.input.fileinputformat.inputdir": hellopath}
hello = self.sc.newAPIHadoopRDD("org.apache.hadoop.mapreduce.lib.input.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=newconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newolderror(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_bad_inputs(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.sequenceFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.NotValidWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
maps = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
keyConverter="org.apache.spark.api.python.TestInputKeyConverter",
valueConverter="org.apache.spark.api.python.TestInputValueConverter").collect())
em = [(u'\x01', []),
(u'\x01', [3.0]),
(u'\x02', [1.0]),
(u'\x02', [1.0]),
(u'\x03', [2.0])]
self.assertEqual(maps, em)
def test_binary_files(self):
path = os.path.join(self.tempdir.name, "binaryfiles")
os.mkdir(path)
data = b"short binary data"
with open(os.path.join(path, "part-0000"), 'wb') as f:
f.write(data)
[(p, d)] = self.sc.binaryFiles(path).collect()
self.assertTrue(p.endswith("part-0000"))
self.assertEqual(d, data)
def test_binary_records(self):
path = os.path.join(self.tempdir.name, "binaryrecords")
os.mkdir(path)
with open(os.path.join(path, "part-0000"), 'w') as f:
for i in range(100):
f.write('%04d' % i)
result = self.sc.binaryRecords(path, 4).map(int).collect()
self.assertEqual(list(range(100)), result)
class OutputFormatTests(ReusedPySparkTestCase):
def setUp(self):
self.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.tempdir.name)
def tearDown(self):
shutil.rmtree(self.tempdir.name, ignore_errors=True)
@unittest.skipIf(sys.version >= "3", "serialize array of byte")
def test_sequencefiles(self):
basepath = self.tempdir.name
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.sc.parallelize(ei).saveAsSequenceFile(basepath + "/sfint/")
ints = sorted(self.sc.sequenceFile(basepath + "/sfint/").collect())
self.assertEqual(ints, ei)
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.sc.parallelize(ed).saveAsSequenceFile(basepath + "/sfdouble/")
doubles = sorted(self.sc.sequenceFile(basepath + "/sfdouble/").collect())
self.assertEqual(doubles, ed)
ebs = [(1, bytearray(b'\x00\x07spam\x08')), (2, bytearray(b'\x00\x07spam\x08'))]
self.sc.parallelize(ebs).saveAsSequenceFile(basepath + "/sfbytes/")
bytes = sorted(self.sc.sequenceFile(basepath + "/sfbytes/").collect())
self.assertEqual(bytes, ebs)
et = [(u'1', u'aa'),
(u'2', u'bb'),
(u'3', u'cc')]
self.sc.parallelize(et).saveAsSequenceFile(basepath + "/sftext/")
text = sorted(self.sc.sequenceFile(basepath + "/sftext/").collect())
self.assertEqual(text, et)
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.sc.parallelize(eb).saveAsSequenceFile(basepath + "/sfbool/")
bools = sorted(self.sc.sequenceFile(basepath + "/sfbool/").collect())
self.assertEqual(bools, eb)
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.sc.parallelize(en).saveAsSequenceFile(basepath + "/sfnull/")
nulls = sorted(self.sc.sequenceFile(basepath + "/sfnull/").collect())
self.assertEqual(nulls, en)
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(em).saveAsSequenceFile(basepath + "/sfmap/")
maps = self.sc.sequenceFile(basepath + "/sfmap/").collect()
for v in maps:
self.assertTrue(v, em)
def test_oldhadoop(self):
basepath = self.tempdir.name
dict_data = [(1, {}),
(1, {"row1": 1.0}),
(2, {"row2": 2.0})]
self.sc.parallelize(dict_data).saveAsHadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable")
result = self.sc.hadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect()
for v in result:
self.assertTrue(v, dict_data)
conf = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.MapWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/olddataset/"
}
self.sc.parallelize(dict_data).saveAsHadoopDataset(conf)
input_conf = {"mapreduce.input.fileinputformat.inputdir": basepath + "/olddataset/"}
result = self.sc.hadoopRDD(
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
conf=input_conf).collect()
for v in result:
self.assertTrue(v, dict_data)
def test_newhadoop(self):
basepath = self.tempdir.name
data = [(1, ""),
(1, "a"),
(2, "bcdf")]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
self.assertEqual(result, data)
conf = {
"mapreduce.job.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.Text",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/newdataset/"
}
self.sc.parallelize(data).saveAsNewAPIHadoopDataset(conf)
input_conf = {"mapreduce.input.fileinputformat.inputdir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=input_conf).collect())
self.assertEqual(new_dataset, data)
@unittest.skipIf(sys.version >= "3", "serialize of array")
def test_newhadoop_with_array(self):
basepath = self.tempdir.name
# use custom ArrayWritable types and converters to handle arrays
array_data = [(1, array('d')),
(1, array('d', [1.0, 2.0, 3.0])),
(2, array('d', [3.0, 4.0, 5.0]))]
self.sc.parallelize(array_data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
self.assertEqual(result, array_data)
conf = {
"mapreduce.job.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.spark.api.python.DoubleArrayWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/newdataset/"
}
self.sc.parallelize(array_data).saveAsNewAPIHadoopDataset(
conf,
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
input_conf = {"mapreduce.input.fileinputformat.inputdir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter",
conf=input_conf).collect())
self.assertEqual(new_dataset, array_data)
def test_newolderror(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/newolderror/saveAsHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/newolderror/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat"))
def test_bad_inputs(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/badinputs/saveAsHadoopFile/",
"org.apache.hadoop.mapred.NotValidOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/badinputs/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.NotValidOutputFormat"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
data = [(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/converters/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
keyConverter="org.apache.spark.api.python.TestOutputKeyConverter",
valueConverter="org.apache.spark.api.python.TestOutputValueConverter")
converted = sorted(self.sc.sequenceFile(basepath + "/converters/").collect())
expected = [(u'1', 3.0),
(u'2', 1.0),
(u'3', 2.0)]
self.assertEqual(converted, expected)
def test_reserialization(self):
basepath = self.tempdir.name
x = range(1, 5)
y = range(1001, 1005)
data = list(zip(x, y))
rdd = self.sc.parallelize(x).zip(self.sc.parallelize(y))
rdd.saveAsSequenceFile(basepath + "/reserialize/sequence")
result1 = sorted(self.sc.sequenceFile(basepath + "/reserialize/sequence").collect())
self.assertEqual(result1, data)
rdd.saveAsHadoopFile(
basepath + "/reserialize/hadoop",
"org.apache.hadoop.mapred.SequenceFileOutputFormat")
result2 = sorted(self.sc.sequenceFile(basepath + "/reserialize/hadoop").collect())
self.assertEqual(result2, data)
rdd.saveAsNewAPIHadoopFile(
basepath + "/reserialize/newhadoop",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
result3 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newhadoop").collect())
self.assertEqual(result3, data)
conf4 = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/reserialize/dataset"}
rdd.saveAsHadoopDataset(conf4)
result4 = sorted(self.sc.sequenceFile(basepath + "/reserialize/dataset").collect())
self.assertEqual(result4, data)
conf5 = {"mapreduce.job.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/reserialize/newdataset"
}
rdd.saveAsNewAPIHadoopDataset(conf5)
result5 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newdataset").collect())
self.assertEqual(result5, data)
def test_malformed_RDD(self):
basepath = self.tempdir.name
# non-batch-serialized RDD[[(K, V)]] should be rejected
data = [[(1, "a")], [(2, "aa")], [(3, "aaa")]]
rdd = self.sc.parallelize(data, len(data))
self.assertRaises(Exception, lambda: rdd.saveAsSequenceFile(
basepath + "/malformed/sequence"))
class DaemonTests(unittest.TestCase):
def connect(self, port):
from socket import socket, AF_INET, SOCK_STREAM
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('127.0.0.1', port))
# send a split index of -1 to shutdown the worker
sock.send(b"\xFF\xFF\xFF\xFF")
sock.close()
return True
def do_termination_test(self, terminator):
from subprocess import Popen, PIPE
from errno import ECONNREFUSED
# start daemon
daemon_path = os.path.join(os.path.dirname(__file__), "daemon.py")
python_exec = sys.executable or os.environ.get("PYSPARK_PYTHON")
daemon = Popen([python_exec, daemon_path], stdin=PIPE, stdout=PIPE)
# read the port number
port = read_int(daemon.stdout)
# daemon should accept connections
self.assertTrue(self.connect(port))
# request shutdown
terminator(daemon)
time.sleep(1)
# daemon should no longer accept connections
try:
self.connect(port)
except EnvironmentError as exception:
self.assertEqual(exception.errno, ECONNREFUSED)
else:
self.fail("Expected EnvironmentError to be raised")
def test_termination_stdin(self):
"""Ensure that daemon and workers terminate when stdin is closed."""
self.do_termination_test(lambda daemon: daemon.stdin.close())
def test_termination_sigterm(self):
"""Ensure that daemon and workers terminate on SIGTERM."""
from signal import SIGTERM
self.do_termination_test(lambda daemon: os.kill(daemon.pid, SIGTERM))
class WorkerTests(ReusedPySparkTestCase):
def test_cancel_task(self):
temp = tempfile.NamedTemporaryFile(delete=True)
temp.close()
path = temp.name
def sleep(x):
import os
import time
with open(path, 'w') as f:
f.write("%d %d" % (os.getppid(), os.getpid()))
time.sleep(100)
# start job in background thread
def run():
try:
self.sc.parallelize(range(1), 1).foreach(sleep)
except Exception:
pass
import threading
t = threading.Thread(target=run)
t.daemon = True
t.start()
daemon_pid, worker_pid = 0, 0
while True:
if os.path.exists(path):
with open(path) as f:
data = f.read().split(' ')
daemon_pid, worker_pid = map(int, data)
break
time.sleep(0.1)
# cancel jobs
self.sc.cancelAllJobs()
t.join()
for i in range(50):
try:
os.kill(worker_pid, 0)
time.sleep(0.1)
except OSError:
break # worker was killed
else:
self.fail("worker has not been killed after 5 seconds")
try:
os.kill(daemon_pid, 0)
except OSError:
self.fail("daemon had been killed")
# run a normal job
rdd = self.sc.parallelize(xrange(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_after_exception(self):
def raise_exception(_):
raise Exception()
rdd = self.sc.parallelize(xrange(100), 1)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: rdd.foreach(raise_exception))
self.assertEqual(100, rdd.map(str).count())
def test_after_jvm_exception(self):
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name, 1)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
rdd = self.sc.parallelize(xrange(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_accumulator_when_reuse_worker(self):
from pyspark.accumulators import INT_ACCUMULATOR_PARAM
acc1 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(xrange(100), 20).foreach(lambda x: acc1.add(x))
self.assertEqual(sum(range(100)), acc1.value)
acc2 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(xrange(100), 20).foreach(lambda x: acc2.add(x))
self.assertEqual(sum(range(100)), acc2.value)
self.assertEqual(sum(range(100)), acc1.value)
def test_reuse_worker_after_take(self):
rdd = self.sc.parallelize(xrange(100000), 1)
self.assertEqual(0, rdd.first())
def count():
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=count)
t.daemon = True
t.start()
t.join(5)
self.assertTrue(not t.isAlive())
self.assertEqual(100000, rdd.count())
def test_with_different_versions_of_python(self):
rdd = self.sc.parallelize(range(10))
rdd.count()
version = self.sc.pythonVer
self.sc.pythonVer = "2.0"
try:
with QuietTest(self.sc):
self.assertRaises(Py4JJavaError, lambda: rdd.count())
finally:
self.sc.pythonVer = version
class SparkSubmitTests(unittest.TestCase):
def setUp(self):
self.programDir = tempfile.mkdtemp()
tmp_dir = tempfile.gettempdir()
self.sparkSubmit = [
os.path.join(os.environ.get("SPARK_HOME"), "bin", "spark-submit"),
"--conf", "spark.driver.extraJavaOptions=-Djava.io.tmpdir={0}".format(tmp_dir),
"--conf", "spark.executor.extraJavaOptions=-Djava.io.tmpdir={0}".format(tmp_dir),
]
def tearDown(self):
shutil.rmtree(self.programDir)
def createTempFile(self, name, content, dir=None):
"""
Create a temp file with the given name and content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
if dir is None:
path = os.path.join(self.programDir, name)
else:
os.makedirs(os.path.join(self.programDir, dir))
path = os.path.join(self.programDir, dir, name)
with open(path, "w") as f:
f.write(content)
return path
def createFileInZip(self, name, content, ext=".zip", dir=None, zip_name=None):
"""
Create a zip archive containing a file with the given content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
if dir is None:
path = os.path.join(self.programDir, name + ext)
else:
path = os.path.join(self.programDir, dir, zip_name + ext)
zip = zipfile.ZipFile(path, 'w')
zip.writestr(name, content)
zip.close()
return path
def create_spark_package(self, artifact_name):
group_id, artifact_id, version = artifact_name.split(":")
self.createTempFile("%s-%s.pom" % (artifact_id, version), ("""
|<?xml version="1.0" encoding="UTF-8"?>
|<project xmlns="http://maven.apache.org/POM/4.0.0"
| xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
| xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
| http://maven.apache.org/xsd/maven-4.0.0.xsd">
| <modelVersion>4.0.0</modelVersion>
| <groupId>%s</groupId>
| <artifactId>%s</artifactId>
| <version>%s</version>
|</project>
""" % (group_id, artifact_id, version)).lstrip(),
os.path.join(group_id, artifact_id, version))
self.createFileInZip("%s.py" % artifact_id, """
|def myfunc(x):
| return x + 1
""", ".jar", os.path.join(group_id, artifact_id, version),
"%s-%s" % (artifact_id, version))
def test_single_script(self):
"""Submit and test a single script file"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(lambda x: x * 2).collect())
""")
proc = subprocess.Popen(self.sparkSubmit + [script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out.decode('utf-8'))
def test_script_with_local_functions(self):
"""Submit and test a single script file calling a global function"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 3
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(foo).collect())
""")
proc = subprocess.Popen(self.sparkSubmit + [script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[3, 6, 9]", out.decode('utf-8'))
def test_module_dependency(self):
"""Submit and test a script with a dependency on another module"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen(self.sparkSubmit + ["--py-files", zip, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_module_dependency_on_cluster(self):
"""Submit and test a script with a dependency on another module on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen(self.sparkSubmit + ["--py-files", zip, "--master",
"local-cluster[1,1,1024]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_package_dependency(self):
"""Submit and test a script with a dependency on a Spark Package"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
self.create_spark_package("a:mylib:0.1")
proc = subprocess.Popen(
self.sparkSubmit + ["--packages", "a:mylib:0.1", "--repositories",
"file:" + self.programDir, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_package_dependency_on_cluster(self):
"""Submit and test a script with a dependency on a Spark Package on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
self.create_spark_package("a:mylib:0.1")
proc = subprocess.Popen(
self.sparkSubmit + ["--packages", "a:mylib:0.1", "--repositories",
"file:" + self.programDir, "--master", "local-cluster[1,1,1024]",
script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_single_script_on_cluster(self):
"""Submit and test a single script on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 2
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(foo).collect())
""")
# this will fail if you have different spark.executor.memory
# in conf/spark-defaults.conf
proc = subprocess.Popen(
self.sparkSubmit + ["--master", "local-cluster[1,1,1024]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out.decode('utf-8'))
def test_user_configuration(self):
"""Make sure user configuration is respected (SPARK-19307)"""
script = self.createTempFile("test.py", """
|from pyspark import SparkConf, SparkContext
|
|conf = SparkConf().set("spark.test_config", "1")
|sc = SparkContext(conf = conf)
|try:
| if sc._conf.get("spark.test_config") != "1":
| raise Exception("Cannot find spark.test_config in SparkContext's conf.")
|finally:
| sc.stop()
""")
proc = subprocess.Popen(
self.sparkSubmit + ["--master", "local", script],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode, msg="Process failed with error:\n {0}".format(out))
class ContextTests(unittest.TestCase):
def test_failed_sparkcontext_creation(self):
# Regression test for SPARK-1550
self.assertRaises(Exception, lambda: SparkContext("an-invalid-master-name"))
def test_get_or_create(self):
with SparkContext.getOrCreate() as sc:
self.assertTrue(SparkContext.getOrCreate() is sc)
def test_parallelize_eager_cleanup(self):
with SparkContext() as sc:
temp_files = os.listdir(sc._temp_dir)
rdd = sc.parallelize([0, 1, 2])
post_parallalize_temp_files = os.listdir(sc._temp_dir)
self.assertEqual(temp_files, post_parallalize_temp_files)
def test_set_conf(self):
# This is for an internal use case. When there is an existing SparkContext,
# SparkSession's builder needs to set configs into SparkContext's conf.
sc = SparkContext()
sc._conf.set("spark.test.SPARK16224", "SPARK16224")
self.assertEqual(sc._jsc.sc().conf().get("spark.test.SPARK16224"), "SPARK16224")
sc.stop()
def test_stop(self):
sc = SparkContext()
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_with(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_exception(self):
try:
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
raise Exception()
except:
pass
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_stop(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_progress_api(self):
with SparkContext() as sc:
sc.setJobGroup('test_progress_api', '', True)
rdd = sc.parallelize(range(10)).map(lambda x: time.sleep(100))
def run():
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=run)
t.daemon = True
t.start()
# wait for scheduler to start
time.sleep(1)
tracker = sc.statusTracker()
jobIds = tracker.getJobIdsForGroup('test_progress_api')
self.assertEqual(1, len(jobIds))
job = tracker.getJobInfo(jobIds[0])
self.assertEqual(1, len(job.stageIds))
stage = tracker.getStageInfo(job.stageIds[0])
self.assertEqual(rdd.getNumPartitions(), stage.numTasks)
sc.cancelAllJobs()
t.join()
# wait for event listener to update the status
time.sleep(1)
job = tracker.getJobInfo(jobIds[0])
self.assertEqual('FAILED', job.status)
self.assertEqual([], tracker.getActiveJobsIds())
self.assertEqual([], tracker.getActiveStageIds())
sc.stop()
def test_startTime(self):
with SparkContext() as sc:
self.assertGreater(sc.startTime, 0)
class ConfTests(unittest.TestCase):
def test_memory_conf(self):
memoryList = ["1T", "1G", "1M", "1024K"]
for memory in memoryList:
sc = SparkContext(conf=SparkConf().set("spark.python.worker.memory", memory))
l = list(range(1024))
random.shuffle(l)
rdd = sc.parallelize(l, 4)
self.assertEqual(sorted(l), rdd.sortBy(lambda x: x).collect())
sc.stop()
class KeywordOnlyTests(unittest.TestCase):
class Wrapped(object):
@keyword_only
def set(self, x=None, y=None):
if "x" in self._input_kwargs:
self._x = self._input_kwargs["x"]
if "y" in self._input_kwargs:
self._y = self._input_kwargs["y"]
return x, y
def test_keywords(self):
w = self.Wrapped()
x, y = w.set(y=1)
self.assertEqual(y, 1)
self.assertEqual(y, w._y)
self.assertIsNone(x)
self.assertFalse(hasattr(w, "_x"))
def test_non_keywords(self):
w = self.Wrapped()
self.assertRaises(TypeError, lambda: w.set(0, y=1))
def test_kwarg_ownership(self):
# test _input_kwargs is owned by each class instance and not a shared static variable
class Setter(object):
@keyword_only
def set(self, x=None, other=None, other_x=None):
if "other" in self._input_kwargs:
self._input_kwargs["other"].set(x=self._input_kwargs["other_x"])
self._x = self._input_kwargs["x"]
a = Setter()
b = Setter()
a.set(x=1, other=b, other_x=2)
self.assertEqual(a._x, 1)
self.assertEqual(b._x, 2)
class UtilTests(PySparkTestCase):
def test_py4j_exception_message(self):
from pyspark.util import _exception_message
with self.assertRaises(Py4JJavaError) as context:
# This attempts java.lang.String(null) which throws an NPE.
self.sc._jvm.java.lang.String(None)
self.assertTrue('NullPointerException' in _exception_message(context.exception))
def test_parsing_version_string(self):
from pyspark.util import VersionUtils
self.assertRaises(ValueError, lambda: VersionUtils.majorMinorVersion("abced"))
@unittest.skipIf(not _have_scipy, "SciPy not installed")
class SciPyTests(PySparkTestCase):
"""General PySpark tests that depend on scipy """
def test_serialize(self):
from scipy.special import gammaln
x = range(1, 5)
expected = list(map(gammaln, x))
observed = self.sc.parallelize(x).map(gammaln).collect()
self.assertEqual(expected, observed)
@unittest.skipIf(not _have_numpy, "NumPy not installed")
class NumPyTests(PySparkTestCase):
"""General PySpark tests that depend on numpy """
def test_statcounter_array(self):
x = self.sc.parallelize([np.array([1.0, 1.0]), np.array([2.0, 2.0]), np.array([3.0, 3.0])])
s = x.stats()
self.assertSequenceEqual([2.0, 2.0], s.mean().tolist())
self.assertSequenceEqual([1.0, 1.0], s.min().tolist())
self.assertSequenceEqual([3.0, 3.0], s.max().tolist())
self.assertSequenceEqual([1.0, 1.0], s.sampleStdev().tolist())
stats_dict = s.asDict()
self.assertEqual(3, stats_dict['count'])
self.assertSequenceEqual([2.0, 2.0], stats_dict['mean'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['min'].tolist())
self.assertSequenceEqual([3.0, 3.0], stats_dict['max'].tolist())
self.assertSequenceEqual([6.0, 6.0], stats_dict['sum'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['stdev'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['variance'].tolist())
stats_sample_dict = s.asDict(sample=True)
self.assertEqual(3, stats_dict['count'])
self.assertSequenceEqual([2.0, 2.0], stats_sample_dict['mean'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_sample_dict['min'].tolist())
self.assertSequenceEqual([3.0, 3.0], stats_sample_dict['max'].tolist())
self.assertSequenceEqual([6.0, 6.0], stats_sample_dict['sum'].tolist())
self.assertSequenceEqual(
[0.816496580927726, 0.816496580927726], stats_sample_dict['stdev'].tolist())
self.assertSequenceEqual(
[0.6666666666666666, 0.6666666666666666], stats_sample_dict['variance'].tolist())
if __name__ == "__main__":
from pyspark.tests import *
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'), verbosity=2)
else:
unittest.main(verbosity=2)
| [
"os.mkdir",
"os.unlink",
"pyspark.context.SparkContext.getOrCreate",
"pyspark.util.VersionUtils.majorMinorVersion",
"random.shuffle",
"os.getppid",
"socket.socket",
"pyspark.conf.SparkConf",
"collections.defaultdict",
"pyspark.context.SparkContext",
"pyspark.serializers.NoOpSerializer",
"glob.... | [((99178, 99233), 'unittest2.skipIf', 'unittest.skipIf', (['(not _have_scipy)', '"""SciPy not installed"""'], {}), "(not _have_scipy, 'SciPy not installed')\n", (99193, 99233), True, 'import unittest2 as unittest\n'), ((99575, 99630), 'unittest2.skipIf', 'unittest.skipIf', (['(not _have_numpy)', '"""NumPy not installed"""'], {}), "(not _have_numpy, 'NumPy not installed')\n", (99590, 99630), True, 'import unittest2 as unittest\n'), ((56635, 56697), 'unittest2.skipIf', 'unittest.skipIf', (["(sys.version >= '3')", '"""serialize array of byte"""'], {}), "(sys.version >= '3', 'serialize array of byte')\n", (56650, 56697), True, 'import unittest2 as unittest\n'), ((66961, 67023), 'unittest2.skipIf', 'unittest.skipIf', (["(sys.version >= '3')", '"""serialize array of byte"""'], {}), "(sys.version >= '3', 'serialize array of byte')\n", (66976, 67023), True, 'import unittest2 as unittest\n'), ((72282, 72339), 'unittest2.skipIf', 'unittest.skipIf', (["(sys.version >= '3')", '"""serialize of array"""'], {}), "(sys.version >= '3', 'serialize of array')\n", (72297, 72339), True, 'import unittest2 as unittest\n'), ((3010, 3040), 'pyspark.shuffle.ExternalMerger', 'ExternalMerger', (['self.agg', '(1000)'], {}), '(self.agg, 1000)\n', (3024, 3040), False, 'from pyspark.shuffle import Aggregator, ExternalMerger, ExternalSorter\n'), ((3231, 3261), 'pyspark.shuffle.ExternalMerger', 'ExternalMerger', (['self.agg', '(1000)'], {}), '(self.agg, 1000)\n', (3245, 3261), False, 'from pyspark.shuffle import Aggregator, ExternalMerger, ExternalSorter\n'), ((3530, 3558), 'pyspark.shuffle.ExternalMerger', 'ExternalMerger', (['self.agg', '(20)'], {}), '(self.agg, 20)\n', (3544, 3558), False, 'from pyspark.shuffle import Aggregator, ExternalMerger, ExternalSorter\n'), ((3750, 3778), 'pyspark.shuffle.ExternalMerger', 'ExternalMerger', (['self.agg', '(10)'], {}), '(self.agg, 10)\n', (3764, 3778), False, 'from pyspark.shuffle import Aggregator, ExternalMerger, ExternalSorter\n'), ((4054, 4095), 'pyspark.shuffle.ExternalMerger', 'ExternalMerger', (['self.agg', '(5)'], {'partitions': '(3)'}), '(self.agg, 5, partitions=3)\n', (4068, 4095), False, 'from pyspark.shuffle import Aggregator, ExternalMerger, ExternalSorter\n'), ((5073, 5091), 'pyspark.serializers.PickleSerializer', 'PickleSerializer', ([], {}), '()\n', (5089, 5091), False, 'from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, FlattenedValuesSerializer\n'), ((6490, 6507), 'random.shuffle', 'random.shuffle', (['l'], {}), '(l)\n', (6504, 6507), False, 'import random\n'), ((6525, 6545), 'pyspark.shuffle.ExternalSorter', 'ExternalSorter', (['(1024)'], {}), '(1024)\n', (6539, 6545), False, 'from pyspark.shuffle import Aggregator, ExternalMerger, ExternalSorter\n'), ((7136, 7153), 'random.shuffle', 'random.shuffle', (['l'], {}), '(l)\n', (7150, 7153), False, 'import random\n'), ((8059, 8082), 'pyspark.context.SparkContext', 'SparkContext', ([], {'conf': 'conf'}), '(conf=conf)\n', (8071, 8082), False, 'from pyspark.context import SparkContext\n'), ((8122, 8139), 'random.shuffle', 'random.shuffle', (['l'], {}), '(l)\n', (8136, 8139), False, 'import random\n'), ((8441, 8463), 'collections.namedtuple', 'namedtuple', (['"""P"""', '"""x y"""'], {}), "('P', 'x y')\n", (8451, 8463), False, 'from collections import namedtuple\n'), ((8768, 8791), 'pyspark.serializers.CloudPickleSerializer', 'CloudPickleSerializer', ([], {}), '()\n', (8789, 8791), False, 'from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, FlattenedValuesSerializer\n'), ((8831, 8844), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (8841, 8844), False, 'from operator import itemgetter\n'), ((8958, 8974), 'operator.itemgetter', 'itemgetter', (['(0)', '(3)'], {}), '(0, 3)\n', (8968, 8974), False, 'from operator import itemgetter\n'), ((9126, 9149), 'pyspark.serializers.CloudPickleSerializer', 'CloudPickleSerializer', ([], {}), '()\n', (9147, 9149), False, 'from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, FlattenedValuesSerializer\n'), ((9366, 9389), 'pyspark.serializers.CloudPickleSerializer', 'CloudPickleSerializer', ([], {}), '()\n', (9387, 9389), False, 'from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, FlattenedValuesSerializer\n'), ((9518, 9533), 'operator.attrgetter', 'attrgetter', (['"""a"""'], {}), "('a')\n", (9528, 9533), False, 'from operator import attrgetter\n'), ((9646, 9666), 'operator.attrgetter', 'attrgetter', (['"""a"""', '"""b"""'], {}), "('a', 'b')\n", (9656, 9666), False, 'from operator import attrgetter\n'), ((9798, 9815), 'operator.attrgetter', 'attrgetter', (['"""e.a"""'], {}), "('e.a')\n", (9808, 9815), False, 'from operator import attrgetter\n'), ((9928, 9952), 'operator.attrgetter', 'attrgetter', (['"""e.a"""', '"""e.b"""'], {}), "('e.a', 'e.b')\n", (9938, 9952), False, 'from operator import attrgetter\n'), ((10574, 10597), 'pyspark.serializers.CloudPickleSerializer', 'CloudPickleSerializer', ([], {}), '()\n', (10595, 10597), False, 'from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, FlattenedValuesSerializer\n'), ((11026, 11036), 'io.BytesIO', 'StringIO', ([], {}), '()\n', (11034, 11036), True, 'from io import BytesIO as StringIO\n'), ((12510, 12546), 'pyspark.context.SparkContext', 'SparkContext', (['"""local[4]"""', 'class_name'], {}), "('local[4]', class_name)\n", (12522, 12546), False, 'from pyspark.context import SparkContext\n'), ((12743, 12781), 'pyspark.context.SparkContext', 'SparkContext', (['"""local[4]"""', 'cls.__name__'], {}), "('local[4]', cls.__name__)\n", (12755, 12781), False, 'from pyspark.context import SparkContext\n'), ((12949, 12990), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (12976, 12990), False, 'import tempfile\n'), ((12999, 13033), 'os.unlink', 'os.unlink', (['self.checkpointDir.name'], {}), '(self.checkpointDir.name)\n', (13008, 13033), False, 'import os\n'), ((13125, 13163), 'shutil.rmtree', 'shutil.rmtree', (['self.checkpointDir.name'], {}), '(self.checkpointDir.name)\n', (13138, 13163), False, 'import shutil\n'), ((13546, 13559), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (13556, 13559), False, 'import time\n'), ((14240, 14253), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (14250, 14253), False, 'import time\n'), ((14997, 15010), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (15007, 15010), False, 'import time\n'), ((15773, 15835), 'os.path.join', 'os.path.join', (['SPARK_HOME', '"""python/test_support/userlibrary.py"""'], {}), "(SPARK_HOME, 'python/test_support/userlibrary.py')\n", (15785, 15835), False, 'import os\n'), ((16029, 16092), 'os.path.join', 'os.path.join', (['SPARK_HOME', '"""python/test_support/hello/hello.txt"""'], {}), "(SPARK_HOME, 'python/test_support/hello/hello.txt')\n", (16041, 16092), False, 'import os\n'), ((16147, 16174), 'pyspark.files.SparkFiles.get', 'SparkFiles.get', (['"""hello.txt"""'], {}), "('hello.txt')\n", (16161, 16174), False, 'from pyspark.files import SparkFiles\n'), ((16405, 16458), 'os.path.join', 'os.path.join', (['SPARK_HOME', '"""python/test_support/hello"""'], {}), "(SPARK_HOME, 'python/test_support/hello')\n", (16417, 16458), False, 'import os\n'), ((16519, 16542), 'pyspark.files.SparkFiles.get', 'SparkFiles.get', (['"""hello"""'], {}), "('hello')\n", (16533, 16542), False, 'from pyspark.files import SparkFiles\n'), ((17191, 17253), 'os.path.join', 'os.path.join', (['SPARK_HOME', '"""python/test_support/userlibrary.py"""'], {}), "(SPARK_HOME, 'python/test_support/userlibrary.py')\n", (17203, 17253), False, 'import os\n'), ((17706, 17769), 'os.path.join', 'os.path.join', (['SPARK_HOME', '"""python/test_support/userlib-0.1.zip"""'], {}), "(SPARK_HOME, 'python/test_support/userlib-0.1.zip')\n", (17718, 17769), False, 'import os\n'), ((18606, 18645), 'pyspark.context.SparkContext', 'SparkContext', (['"""local[4, 2]"""', 'class_name'], {}), "('local[4, 2]', class_name)\n", (18618, 18645), False, 'from pyspark.context import SparkContext\n'), ((21281, 21298), 'pyspark.taskcontext.TaskContext.get', 'TaskContext.get', ([], {}), '()\n', (21296, 21298), False, 'from pyspark.taskcontext import BarrierTaskContext, TaskContext\n'), ((24040, 24048), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (24045, 24048), False, 'from time import sleep\n'), ((24180, 24188), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (24185, 24188), False, 'from time import sleep\n'), ((24422, 24462), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(True)'}), '(delete=True)\n', (24449, 24462), False, 'import tempfile\n'), ((24885, 24925), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(True)'}), '(delete=True)\n', (24912, 24925), False, 'import tempfile\n'), ((25636, 25676), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(True)'}), '(delete=True)\n', (25663, 25676), False, 'import tempfile\n'), ((25940, 26003), 'os.path.join', 'os.path.join', (['SPARK_HOME', '"""python/test_support/hello/hello.txt"""'], {}), "(SPARK_HOME, 'python/test_support/hello/hello.txt')\n", (25952, 26003), False, 'import os\n'), ((27360, 27401), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (27387, 27401), False, 'import tempfile\n'), ((27625, 27649), 'os.unlink', 'os.unlink', (['tempFile.name'], {}), '(tempFile.name)\n', (27634, 27649), False, 'import os\n'), ((31406, 31422), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (31417, 31422), False, 'from collections import defaultdict\n'), ((33406, 33451), 'collections.namedtuple', 'namedtuple', (['"""Person"""', '"""id firstName lastName"""'], {}), "('Person', 'id firstName lastName')\n", (33416, 33451), False, 'from collections import namedtuple\n'), ((34653, 34670), 'random.shuffle', 'random.shuffle', (['r'], {}), '(r)\n', (34667, 34670), False, 'import random\n'), ((35072, 35089), 'random.shuffle', 'random.shuffle', (['r'], {}), '(r)\n', (35086, 35089), False, 'import random\n'), ((36508, 36556), 'threading.Thread', 'threading.Thread', ([], {'target': 'run', 'args': '(f2, self.sc)'}), '(target=run, args=(f2, self.sc))\n', (36524, 36556), False, 'import threading\n'), ((37754, 37817), 'os.path.join', 'os.path.join', (['SPARK_HOME', '"""python/test_support/hello/hello.txt"""'], {}), "(SPARK_HOME, 'python/test_support/hello/hello.txt')\n", (37766, 37817), False, 'import os\n'), ((45639, 45702), 'os.path.join', 'os.path.join', (['SPARK_HOME', '"""python/test_support/hello/hello.txt"""'], {}), "(SPARK_HOME, 'python/test_support/hello/hello.txt')\n", (45651, 45702), False, 'import os\n'), ((48484, 48513), 'pyspark.rdd.RDD', 'RDD', (['data_python_rdd', 'self.sc'], {}), '(data_python_rdd, self.sc)\n', (48487, 48513), False, 'from pyspark.rdd import RDD\n'), ((48793, 48822), 'pyspark.rdd.RDD', 'RDD', (['data_python_rdd', 'self.sc'], {}), '(data_python_rdd, self.sc)\n', (48796, 48822), False, 'from pyspark.rdd import RDD\n'), ((54000, 54047), 'pyspark.context.SparkContext', 'SparkContext', (['"""local[4]"""', 'class_name'], {'conf': 'conf'}), "('local[4]', class_name, conf=conf)\n", (54012, 54047), False, 'from pyspark.context import SparkContext\n'), ((54557, 54567), 'io.BytesIO', 'StringIO', ([], {}), '()\n', (54565, 54567), True, 'from io import BytesIO as StringIO\n'), ((54699, 54720), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (54718, 54720), False, 'import tempfile\n'), ((56321, 56362), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (56348, 56362), False, 'import tempfile\n'), ((56371, 56398), 'os.unlink', 'os.unlink', (['cls.tempdir.name'], {}), '(cls.tempdir.name)\n', (56380, 56398), False, 'import os\n'), ((56597, 56628), 'shutil.rmtree', 'shutil.rmtree', (['cls.tempdir.name'], {}), '(cls.tempdir.name)\n', (56610, 56628), False, 'import shutil\n'), ((62363, 62426), 'os.path.join', 'os.path.join', (['SPARK_HOME', '"""python/test_support/hello/hello.txt"""'], {}), "(SPARK_HOME, 'python/test_support/hello/hello.txt')\n", (62375, 62426), False, 'import os\n'), ((63331, 63394), 'os.path.join', 'os.path.join', (['SPARK_HOME', '"""python/test_support/hello/hello.txt"""'], {}), "(SPARK_HOME, 'python/test_support/hello/hello.txt')\n", (63343, 63394), False, 'import os\n'), ((65990, 66036), 'os.path.join', 'os.path.join', (['self.tempdir.name', '"""binaryfiles"""'], {}), "(self.tempdir.name, 'binaryfiles')\n", (66002, 66036), False, 'import os\n'), ((66045, 66059), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (66053, 66059), False, 'import os\n'), ((66374, 66422), 'os.path.join', 'os.path.join', (['self.tempdir.name', '"""binaryrecords"""'], {}), "(self.tempdir.name, 'binaryrecords')\n", (66386, 66422), False, 'import os\n'), ((66431, 66445), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (66439, 66445), False, 'import os\n'), ((66790, 66831), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (66817, 66831), False, 'import tempfile\n'), ((66840, 66868), 'os.unlink', 'os.unlink', (['self.tempdir.name'], {}), '(self.tempdir.name)\n', (66849, 66868), False, 'import os\n'), ((66902, 66954), 'shutil.rmtree', 'shutil.rmtree', (['self.tempdir.name'], {'ignore_errors': '(True)'}), '(self.tempdir.name, ignore_errors=True)\n', (66915, 66954), False, 'import shutil\n'), ((79077, 79105), 'socket.socket', 'socket', (['AF_INET', 'SOCK_STREAM'], {}), '(AF_INET, SOCK_STREAM)\n', (79083, 79105), False, 'from socket import socket, AF_INET, SOCK_STREAM\n'), ((79605, 79663), 'subprocess.Popen', 'Popen', (['[python_exec, daemon_path]'], {'stdin': 'PIPE', 'stdout': 'PIPE'}), '([python_exec, daemon_path], stdin=PIPE, stdout=PIPE)\n', (79610, 79663), False, 'from subprocess import Popen, PIPE\n'), ((79711, 79734), 'pyspark.serializers.read_int', 'read_int', (['daemon.stdout'], {}), '(daemon.stdout)\n', (79719, 79734), False, 'from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, FlattenedValuesSerializer\n'), ((79886, 79899), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (79896, 79899), False, 'import time\n'), ((80680, 80720), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(True)'}), '(delete=True)\n', (80707, 80720), False, 'import tempfile\n'), ((81196, 81224), 'threading.Thread', 'threading.Thread', ([], {'target': 'run'}), '(target=run)\n', (81212, 81224), False, 'import threading\n'), ((82520, 82561), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (82547, 82561), False, 'import tempfile\n'), ((82788, 82812), 'os.unlink', 'os.unlink', (['tempFile.name'], {}), '(tempFile.name)\n', (82797, 82812), False, 'import os\n'), ((83842, 83872), 'threading.Thread', 'threading.Thread', ([], {'target': 'count'}), '(target=count)\n', (83858, 83872), False, 'import threading\n'), ((84480, 84498), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (84496, 84498), False, 'import tempfile\n'), ((84517, 84538), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (84536, 84538), False, 'import tempfile\n'), ((84876, 84906), 'shutil.rmtree', 'shutil.rmtree', (['self.programDir'], {}), '(self.programDir)\n', (84889, 84906), False, 'import shutil\n'), ((85162, 85196), 're.compile', 're.compile', (['"""^ *\\\\|"""', 're.MULTILINE'], {}), "('^ *\\\\|', re.MULTILINE)\n", (85172, 85196), False, 'import re\n'), ((85843, 85877), 're.compile', 're.compile', (['"""^ *\\\\|"""', 're.MULTILINE'], {}), "('^ *\\\\|', re.MULTILINE)\n", (85853, 85877), False, 'import re\n'), ((86116, 86142), 'zipfile.ZipFile', 'zipfile.ZipFile', (['path', '"""w"""'], {}), "(path, 'w')\n", (86131, 86142), False, 'import zipfile\n'), ((87641, 87710), 'subprocess.Popen', 'subprocess.Popen', (['(self.sparkSubmit + [script])'], {'stdout': 'subprocess.PIPE'}), '(self.sparkSubmit + [script], stdout=subprocess.PIPE)\n', (87657, 87710), False, 'import subprocess\n'), ((88287, 88356), 'subprocess.Popen', 'subprocess.Popen', (['(self.sparkSubmit + [script])'], {'stdout': 'subprocess.PIPE'}), '(self.sparkSubmit + [script], stdout=subprocess.PIPE)\n', (88303, 88356), False, 'import subprocess\n'), ((89019, 89112), 'subprocess.Popen', 'subprocess.Popen', (["(self.sparkSubmit + ['--py-files', zip, script])"], {'stdout': 'subprocess.PIPE'}), "(self.sparkSubmit + ['--py-files', zip, script], stdout=\n subprocess.PIPE)\n", (89035, 89112), False, 'import subprocess\n'), ((89826, 89957), 'subprocess.Popen', 'subprocess.Popen', (["(self.sparkSubmit + ['--py-files', zip, '--master',\n 'local-cluster[1,1,1024]', script])"], {'stdout': 'subprocess.PIPE'}), "(self.sparkSubmit + ['--py-files', zip, '--master',\n 'local-cluster[1,1,1024]', script], stdout=subprocess.PIPE)\n", (89842, 89957), False, 'import subprocess\n'), ((90605, 90757), 'subprocess.Popen', 'subprocess.Popen', (["(self.sparkSubmit + ['--packages', 'a:mylib:0.1', '--repositories', 'file:' +\n self.programDir, script])"], {'stdout': 'subprocess.PIPE'}), "(self.sparkSubmit + ['--packages', 'a:mylib:0.1',\n '--repositories', 'file:' + self.programDir, script], stdout=subprocess\n .PIPE)\n", (90621, 90757), False, 'import subprocess\n'), ((91417, 91607), 'subprocess.Popen', 'subprocess.Popen', (["(self.sparkSubmit + ['--packages', 'a:mylib:0.1', '--repositories', 'file:' +\n self.programDir, '--master', 'local-cluster[1,1,1024]', script])"], {'stdout': 'subprocess.PIPE'}), "(self.sparkSubmit + ['--packages', 'a:mylib:0.1',\n '--repositories', 'file:' + self.programDir, '--master',\n 'local-cluster[1,1,1024]', script], stdout=subprocess.PIPE)\n", (91433, 91607), False, 'import subprocess\n'), ((92351, 92463), 'subprocess.Popen', 'subprocess.Popen', (["(self.sparkSubmit + ['--master', 'local-cluster[1,1,1024]', script])"], {'stdout': 'subprocess.PIPE'}), "(self.sparkSubmit + ['--master', 'local-cluster[1,1,1024]',\n script], stdout=subprocess.PIPE)\n", (92367, 92463), False, 'import subprocess\n'), ((93218, 93339), 'subprocess.Popen', 'subprocess.Popen', (["(self.sparkSubmit + ['--master', 'local', script])"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), "(self.sparkSubmit + ['--master', 'local', script], stdout=\n subprocess.PIPE, stderr=subprocess.STDOUT)\n", (93234, 93339), False, 'import subprocess\n'), ((94388, 94402), 'pyspark.context.SparkContext', 'SparkContext', ([], {}), '()\n', (94400, 94402), False, 'from pyspark.context import SparkContext\n'), ((94609, 94623), 'pyspark.context.SparkContext', 'SparkContext', ([], {}), '()\n', (94621, 94623), False, 'from pyspark.context import SparkContext\n'), ((101541, 101567), 'unittest2.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (101554, 101567), True, 'import unittest2 as unittest\n'), ((1306, 1385), 'sys.stderr.write', 'sys.stderr.write', (['"""Please install unittest2 to test with Python 2.6 or earlier"""'], {}), "('Please install unittest2 to test with Python 2.6 or earlier')\n", (1322, 1385), False, 'import sys\n'), ((1394, 1405), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1402, 1405), False, 'import sys\n'), ((5713, 5773), 'pyspark.shuffle.Aggregator', 'Aggregator', (['stopit', 'legit_merge_value', 'legit_merge_combiners'], {}), '(stopit, legit_merge_value, legit_merge_combiners)\n', (5723, 5773), False, 'from pyspark.shuffle import Aggregator, ExternalMerger, ExternalSorter\n'), ((5936, 6000), 'pyspark.shuffle.Aggregator', 'Aggregator', (['legit_create_combiner', 'stopit', 'legit_merge_combiners'], {}), '(legit_create_combiner, stopit, legit_merge_combiners)\n', (5946, 6000), False, 'from pyspark.shuffle import Aggregator, ExternalMerger, ExternalSorter\n'), ((6167, 6227), 'pyspark.shuffle.Aggregator', 'Aggregator', (['legit_create_combiner', 'legit_merge_value', 'stopit'], {}), '(legit_create_combiner, legit_merge_value, stopit)\n', (6177, 6227), False, 'from pyspark.shuffle import Aggregator, ExternalMerger, ExternalSorter\n'), ((8504, 8516), 'pyspark.cloudpickle.dumps', 'dumps', (['p1', '(2)'], {}), '(p1, 2)\n', (8509, 8516), False, 'from pyspark.cloudpickle import dumps\n'), ((8617, 8625), 'pyspark.cloudpickle.dumps', 'dumps', (['P'], {}), '(P)\n', (8622, 8625), False, 'from pyspark.cloudpickle import dumps\n'), ((10215, 10238), 'pyspark.serializers.CloudPickleSerializer', 'CloudPickleSerializer', ([], {}), '()\n', (10236, 10238), False, 'from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, FlattenedValuesSerializer\n'), ((10692, 10703), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (10700, 10703), False, 'import sys\n'), ((10863, 10881), 'pyspark.serializers.PickleSerializer', 'PickleSerializer', ([], {}), '()\n', (10879, 10881), False, 'from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, FlattenedValuesSerializer\n'), ((11418, 11434), 'pyspark.serializers.NoOpSerializer', 'NoOpSerializer', ([], {}), '()\n', (11432, 11434), False, 'from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, FlattenedValuesSerializer\n'), ((11449, 11467), 'pyspark.serializers.UTF8Deserializer', 'UTF8Deserializer', ([], {}), '()\n', (11465, 11467), False, 'from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, FlattenedValuesSerializer\n'), ((11482, 11500), 'pyspark.serializers.PickleSerializer', 'PickleSerializer', ([], {}), '()\n', (11498, 11500), False, 'from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, FlattenedValuesSerializer\n'), ((11515, 11534), 'pyspark.serializers.MarshalSerializer', 'MarshalSerializer', ([], {}), '()\n', (11532, 11534), False, 'from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, FlattenedValuesSerializer\n'), ((11549, 11565), 'pyspark.serializers.AutoSerializer', 'AutoSerializer', ([], {}), '()\n', (11563, 11565), False, 'from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, FlattenedValuesSerializer\n'), ((17995, 18062), 'os.path.join', 'os.path.join', (['SPARK_HOME', '"""python/test_support/SimpleHTTPServer.py"""'], {}), "(SPARK_HOME, 'python/test_support/SimpleHTTPServer.py')\n", (18007, 18062), False, 'import os\n'), ((20184, 20201), 'pyspark.taskcontext.TaskContext.get', 'TaskContext.get', ([], {}), '()\n', (20199, 20201), False, 'from pyspark.taskcontext import BarrierTaskContext, TaskContext\n'), ((22296, 22320), 'pyspark.taskcontext.BarrierTaskContext.get', 'BarrierTaskContext.get', ([], {}), '()\n', (22318, 22320), False, 'from pyspark.taskcontext import BarrierTaskContext, TaskContext\n'), ((22411, 22422), 'time.time', 'time.time', ([], {}), '()\n', (22420, 22422), False, 'import time\n'), ((28673, 28689), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (28684, 28689), False, 'from collections import defaultdict\n'), ((28741, 28757), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (28752, 28757), False, 'from collections import defaultdict\n'), ((28813, 28829), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (28824, 28829), False, 'from collections import defaultdict\n'), ((28888, 28904), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (28899, 28904), False, 'from collections import defaultdict\n'), ((31301, 31317), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (31312, 31317), False, 'from collections import defaultdict\n'), ((31355, 31371), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (31366, 31371), False, 'from collections import defaultdict\n'), ((35907, 35930), 'pyspark.serializers.CloudPickleSerializer', 'CloudPickleSerializer', ([], {}), '()\n', (35928, 35930), False, 'from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, FlattenedValuesSerializer\n'), ((37580, 37599), 'pyspark.serializers.MarshalSerializer', 'MarshalSerializer', ([], {}), '()\n', (37597, 37599), False, 'from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, FlattenedValuesSerializer\n'), ((47875, 47893), 'pyspark.serializers.UTF8Deserializer', 'UTF8Deserializer', ([], {}), '()\n', (47891, 47893), False, 'from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, FlattenedValuesSerializer\n'), ((47988, 48004), 'pyspark.serializers.NoOpSerializer', 'NoOpSerializer', ([], {}), '()\n', (48002, 48004), False, 'from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, FlattenedValuesSerializer\n'), ((79475, 79500), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (79490, 79500), False, 'import os\n'), ((79555, 79587), 'os.environ.get', 'os.environ.get', (['"""PYSPARK_PYTHON"""'], {}), "('PYSPARK_PYTHON')\n", (79569, 79587), False, 'import os\n'), ((80950, 80965), 'time.sleep', 'time.sleep', (['(100)'], {}), '(100)\n', (80960, 80965), False, 'import time\n'), ((81341, 81361), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (81355, 81361), False, 'import os\n'), ((81538, 81553), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (81548, 81553), False, 'import time\n'), ((81922, 81944), 'os.kill', 'os.kill', (['daemon_pid', '(0)'], {}), '(daemon_pid, 0)\n', (81929, 81944), False, 'import os\n'), ((85295, 85330), 'os.path.join', 'os.path.join', (['self.programDir', 'name'], {}), '(self.programDir, name)\n', (85307, 85330), False, 'import os\n'), ((85424, 85464), 'os.path.join', 'os.path.join', (['self.programDir', 'dir', 'name'], {}), '(self.programDir, dir, name)\n', (85436, 85464), False, 'import os\n'), ((85976, 86017), 'os.path.join', 'os.path.join', (['self.programDir', '(name + ext)'], {}), '(self.programDir, name + ext)\n', (85988, 86017), False, 'import os\n'), ((86051, 86101), 'os.path.join', 'os.path.join', (['self.programDir', 'dir', '(zip_name + ext)'], {}), '(self.programDir, dir, zip_name + ext)\n', (86063, 86101), False, 'import os\n'), ((87007, 87051), 'os.path.join', 'os.path.join', (['group_id', 'artifact_id', 'version'], {}), '(group_id, artifact_id, version)\n', (87019, 87051), False, 'import os\n'), ((87192, 87236), 'os.path.join', 'os.path.join', (['group_id', 'artifact_id', 'version'], {}), '(group_id, artifact_id, version)\n', (87204, 87236), False, 'import os\n'), ((93772, 93798), 'pyspark.context.SparkContext.getOrCreate', 'SparkContext.getOrCreate', ([], {}), '()\n', (93796, 93798), False, 'from pyspark.context import SparkContext\n'), ((93928, 93942), 'pyspark.context.SparkContext', 'SparkContext', ([], {}), '()\n', (93940, 93942), False, 'from pyspark.context import SparkContext\n'), ((93975, 93999), 'os.listdir', 'os.listdir', (['sc._temp_dir'], {}), '(sc._temp_dir)\n', (93985, 93999), False, 'import os\n'), ((94086, 94110), 'os.listdir', 'os.listdir', (['sc._temp_dir'], {}), '(sc._temp_dir)\n', (94096, 94110), False, 'import os\n'), ((94818, 94832), 'pyspark.context.SparkContext', 'SparkContext', ([], {}), '()\n', (94830, 94832), False, 'from pyspark.context import SparkContext\n'), ((95325, 95339), 'pyspark.context.SparkContext', 'SparkContext', ([], {}), '()\n', (95337, 95339), False, 'from pyspark.context import SparkContext\n'), ((95557, 95571), 'pyspark.context.SparkContext', 'SparkContext', ([], {}), '()\n', (95569, 95571), False, 'from pyspark.context import SparkContext\n'), ((95864, 95892), 'threading.Thread', 'threading.Thread', ([], {'target': 'run'}), '(target=run)\n', (95880, 95892), False, 'import threading\n'), ((95997, 96010), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (96007, 96010), False, 'import time\n'), ((96516, 96529), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (96526, 96529), False, 'import time\n'), ((96820, 96834), 'pyspark.context.SparkContext', 'SparkContext', ([], {}), '()\n', (96832, 96834), False, 'from pyspark.context import SparkContext\n'), ((97179, 97196), 'random.shuffle', 'random.shuffle', (['l'], {}), '(l)\n', (97193, 97196), False, 'import random\n'), ((7994, 8005), 'pyspark.conf.SparkConf', 'SparkConf', ([], {}), '()\n', (8003, 8005), False, 'from pyspark.conf import SparkConf\n'), ((11598, 11616), 'pyspark.serializers.PickleSerializer', 'PickleSerializer', ([], {}), '()\n', (11614, 11616), False, 'from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, FlattenedValuesSerializer\n'), ((11654, 11673), 'pyspark.serializers.MarshalSerializer', 'MarshalSerializer', ([], {}), '()\n', (11671, 11673), False, 'from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, FlattenedValuesSerializer\n'), ((11706, 11722), 'pyspark.serializers.NoOpSerializer', 'NoOpSerializer', ([], {}), '()\n', (11720, 11722), False, 'from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, FlattenedValuesSerializer\n'), ((11724, 11742), 'pyspark.serializers.UTF8Deserializer', 'UTF8Deserializer', ([], {}), '()\n', (11740, 11742), False, 'from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, FlattenedValuesSerializer\n'), ((11780, 11796), 'pyspark.serializers.NoOpSerializer', 'NoOpSerializer', ([], {}), '()\n', (11794, 11796), False, 'from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, FlattenedValuesSerializer\n'), ((11798, 11816), 'pyspark.serializers.UTF8Deserializer', 'UTF8Deserializer', ([], {}), '()\n', (11814, 11816), False, 'from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, FlattenedValuesSerializer\n'), ((11853, 11871), 'pyspark.serializers.PickleSerializer', 'PickleSerializer', ([], {}), '()\n', (11869, 11871), False, 'from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, FlattenedValuesSerializer\n'), ((11913, 11931), 'pyspark.serializers.PickleSerializer', 'PickleSerializer', ([], {}), '()\n', (11929, 11931), False, 'from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, FlattenedValuesSerializer\n'), ((22344, 22365), 'random.randint', 'random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (22358, 22365), False, 'import random\n'), ((34718, 34732), 'hashlib.md5', 'hashlib.md5', (['s'], {}), '(s)\n', (34729, 34732), False, 'import hashlib\n'), ((35137, 35151), 'hashlib.md5', 'hashlib.md5', (['s'], {}), '(s)\n', (35148, 35151), False, 'import hashlib\n'), ((37529, 37547), 'pyspark.serializers.PickleSerializer', 'PickleSerializer', ([], {}), '()\n', (37545, 37547), False, 'from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, FlattenedValuesSerializer\n'), ((53934, 53945), 'pyspark.conf.SparkConf', 'SparkConf', ([], {}), '()\n', (53943, 53945), False, 'from pyspark.conf import SparkConf\n'), ((54802, 54815), 'os.listdir', 'os.listdir', (['d'], {}), '(d)\n', (54812, 54815), False, 'import os\n'), ((60499, 60509), 'array.array', 'array', (['"""d"""'], {}), "('d')\n", (60504, 60509), False, 'from array import array\n'), ((60530, 60557), 'array.array', 'array', (['"""d"""', '[3.0, 4.0, 5.0]'], {}), "('d', [3.0, 4.0, 5.0])\n", (60535, 60557), False, 'from array import array\n'), ((60578, 60605), 'array.array', 'array', (['"""d"""', '[4.0, 5.0, 6.0]'], {}), "('d', [4.0, 5.0, 6.0])\n", (60583, 60605), False, 'from array import array\n'), ((66114, 66145), 'os.path.join', 'os.path.join', (['path', '"""part-0000"""'], {}), "(path, 'part-0000')\n", (66126, 66145), False, 'import os\n'), ((66464, 66495), 'os.path.join', 'os.path.join', (['path', '"""part-0000"""'], {}), "(path, 'part-0000')\n", (66476, 66495), False, 'import os\n'), ((72517, 72527), 'array.array', 'array', (['"""d"""'], {}), "('d')\n", (72522, 72527), False, 'from array import array\n'), ((72556, 72583), 'array.array', 'array', (['"""d"""', '[1.0, 2.0, 3.0]'], {}), "('d', [1.0, 2.0, 3.0])\n", (72561, 72583), False, 'from array import array\n'), ((72612, 72639), 'array.array', 'array', (['"""d"""', '[3.0, 4.0, 5.0]'], {}), "('d', [3.0, 4.0, 5.0])\n", (72617, 72639), False, 'from array import array\n'), ((80559, 80587), 'os.kill', 'os.kill', (['daemon.pid', 'SIGTERM'], {}), '(daemon.pid, SIGTERM)\n', (80566, 80587), False, 'import os\n'), ((81688, 81710), 'os.kill', 'os.kill', (['worker_pid', '(0)'], {}), '(worker_pid, 0)\n', (81695, 81710), False, 'import os\n'), ((81727, 81742), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (81737, 81742), False, 'import time\n'), ((84593, 84621), 'os.environ.get', 'os.environ.get', (['"""SPARK_HOME"""'], {}), "('SPARK_HOME')\n", (84607, 84621), False, 'import os\n'), ((85369, 85403), 'os.path.join', 'os.path.join', (['self.programDir', 'dir'], {}), '(self.programDir, dir)\n', (85381, 85403), False, 'import os\n'), ((93684, 93722), 'pyspark.context.SparkContext', 'SparkContext', (['"""an-invalid-master-name"""'], {}), "('an-invalid-master-name')\n", (93696, 93722), False, 'from pyspark.context import SparkContext\n'), ((95047, 95061), 'pyspark.context.SparkContext', 'SparkContext', ([], {}), '()\n', (95059, 95061), False, 'from pyspark.context import SparkContext\n'), ((98959, 98996), 'pyspark.util._exception_message', '_exception_message', (['context.exception'], {}), '(context.exception)\n', (98977, 98996), False, 'from pyspark.util import _exception_message\n'), ((99134, 99173), 'pyspark.util.VersionUtils.majorMinorVersion', 'VersionUtils.majorMinorVersion', (['"""abced"""'], {}), "('abced')\n", (99164, 99173), False, 'from pyspark.util import VersionUtils\n'), ((99793, 99813), 'numpy.array', 'np.array', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (99801, 99813), True, 'import numpy as np\n'), ((99815, 99835), 'numpy.array', 'np.array', (['[2.0, 2.0]'], {}), '([2.0, 2.0])\n', (99823, 99835), True, 'import numpy as np\n'), ((99837, 99857), 'numpy.array', 'np.array', (['[3.0, 3.0]'], {}), '([3.0, 3.0])\n', (99845, 99857), True, 'import numpy as np\n'), ((101455, 101508), 'xmlrunner.XMLTestRunner', 'xmlrunner.XMLTestRunner', ([], {'output': '"""target/test-reports"""'}), "(output='target/test-reports')\n", (101478, 101508), False, 'import xmlrunner\n'), ((15561, 15572), 'userlib.UserClass', 'UserClass', ([], {}), '()\n', (15570, 15572), False, 'from userlib import UserClass\n'), ((17369, 17380), 'userlib.UserClass', 'UserClass', ([], {}), '()\n', (17378, 17380), False, 'from userlib import UserClass\n'), ((17903, 17914), 'userlib.UserClass', 'UserClass', ([], {}), '()\n', (17912, 17914), False, 'from userlib import UserClass\n'), ((24625, 24660), 'glob.glob', 'glob', (["(tempFile.name + '/part-0000*')"], {}), "(tempFile.name + '/part-0000*')\n", (24629, 24660), False, 'from glob import glob\n'), ((25088, 25123), 'glob.glob', 'glob', (["(tempFile.name + '/part-0000*')"], {}), "(tempFile.name + '/part-0000*')\n", (25092, 25123), False, 'from glob import glob\n'), ((93834, 93860), 'pyspark.context.SparkContext.getOrCreate', 'SparkContext.getOrCreate', ([], {}), '()\n', (93858, 93860), False, 'from pyspark.context import SparkContext\n'), ((95695, 95710), 'time.sleep', 'time.sleep', (['(100)'], {}), '(100)\n', (95705, 95710), False, 'import time\n'), ((33209, 33222), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (33219, 33222), False, 'from operator import itemgetter\n'), ((33278, 33294), 'operator.itemgetter', 'itemgetter', (['(2)', '(3)'], {}), '(2, 3)\n', (33288, 33294), False, 'from operator import itemgetter\n'), ((55694, 55705), 'pyspark.conf.SparkConf', 'SparkConf', ([], {}), '()\n', (55703, 55705), False, 'from pyspark.conf import SparkConf\n'), ((80910, 80922), 'os.getppid', 'os.getppid', ([], {}), '()\n', (80920, 80922), False, 'import os\n'), ((80924, 80935), 'os.getpid', 'os.getpid', ([], {}), '()\n', (80933, 80935), False, 'import os\n'), ((97078, 97089), 'pyspark.conf.SparkConf', 'SparkConf', ([], {}), '()\n', (97087, 97089), False, 'from pyspark.conf import SparkConf\n'), ((19426, 19443), 'pyspark.taskcontext.TaskContext.get', 'TaskContext.get', ([], {}), '()\n', (19441, 19443), False, 'from pyspark.taskcontext import BarrierTaskContext, TaskContext\n'), ((19504, 19521), 'pyspark.taskcontext.TaskContext.get', 'TaskContext.get', ([], {}), '()\n', (19519, 19521), False, 'from pyspark.taskcontext import BarrierTaskContext, TaskContext\n'), ((19936, 19953), 'pyspark.taskcontext.TaskContext.get', 'TaskContext.get', ([], {}), '()\n', (19951, 19953), False, 'from pyspark.taskcontext import BarrierTaskContext, TaskContext\n'), ((22903, 22927), 'pyspark.taskcontext.BarrierTaskContext.get', 'BarrierTaskContext.get', ([], {}), '()\n', (22925, 22927), False, 'from pyspark.taskcontext import BarrierTaskContext, TaskContext\n'), ((18833, 18850), 'pyspark.taskcontext.TaskContext.get', 'TaskContext.get', ([], {}), '()\n', (18848, 18850), False, 'from pyspark.taskcontext import BarrierTaskContext, TaskContext\n'), ((18908, 18925), 'pyspark.taskcontext.TaskContext.get', 'TaskContext.get', ([], {}), '()\n', (18923, 18925), False, 'from pyspark.taskcontext import BarrierTaskContext, TaskContext\n'), ((19051, 19064), 'pyspark.taskcontext.TaskContext', 'TaskContext', ([], {}), '()\n', (19062, 19064), False, 'from pyspark.taskcontext import BarrierTaskContext, TaskContext\n'), ((21665, 21682), 'pyspark.taskcontext.TaskContext.get', 'TaskContext.get', ([], {}), '()\n', (21680, 21682), False, 'from pyspark.taskcontext import BarrierTaskContext, TaskContext\n'), ((21800, 21817), 'pyspark.taskcontext.TaskContext.get', 'TaskContext.get', ([], {}), '()\n', (21815, 21817), False, 'from pyspark.taskcontext import BarrierTaskContext, TaskContext\n'), ((34878, 34899), 'hashlib.md5', 'hashlib.md5', (['b2.value'], {}), '(b2.value)\n', (34889, 34899), False, 'import hashlib\n'), ((35297, 35318), 'hashlib.md5', 'hashlib.md5', (['b2.value'], {}), '(b2.value)\n', (35308, 35318), False, 'import hashlib\n')] |
import torch
import torch.nn as nn
import collections
import numpy as np
import math
import pdb
# Class that handles all the messy hierarchical observation stuff
class HierarchyUtils(object):
def __init__(self, ll_obs_sz, hl_obs_sz, hl_action_space, theta_sz, add_count):
self.ll_obs_sz = ll_obs_sz
if add_count:
self.ll_raw_obs_sz = [self.ll_obs_sz[0] - theta_sz - 1]
else:
self.ll_raw_obs_sz = [self.ll_obs_sz[0] - theta_sz]
self.hl_obs_sz = hl_obs_sz
self.theta_sz = theta_sz
self.hl_action_space = hl_action_space
self.add_count = add_count
# Seperate out highlevel, lowlevel and counts
def seperate_obs(self, obs):
ll_raw_obs = obs[:, :self.ll_raw_obs_sz[0]]
assert(ll_raw_obs.shape[-1] == self.ll_raw_obs_sz[0])
hl_obs = obs[:, self.ll_raw_obs_sz[0]:-1]
assert(hl_obs.shape[-1] == self.hl_obs_sz[0])
count = obs[:, -1]
return hl_obs, ll_raw_obs, count
# Append theta and count to ll obs
def append_theta(self, ll_raw_obs, hl_action, counts):
# Get theta
if self.hl_action_space.__class__.__name__ == 'Discrete':
assert(self.theta_sz == self.hl_action_space.n)
thetas = np.zeros([len(hl_action), self.theta_sz])
for e, act in enumerate(hl_action):
thetas[e, act] = 1
else:
thetas = hl_action
# Concanetate
if self.add_count:
if len(counts.shape) != len(ll_raw_obs.shape):
counts = np.expand_dims(counts, axis=1)
ll_obs = np.concatenate([ll_raw_obs, thetas, counts], 1)
else:
ll_obs = np.concatenate([ll_raw_obs, thetas], 1)
assert(ll_obs.shape[-1] == self.ll_obs_sz[0])
return ll_obs
# Append placeholder theta and count to ll obs
def placeholder_theta(self, ll_raw_obs, counts):
thetas = float('inf') * np.ones([len(ll_raw_obs), self.theta_sz])
# Concanetate
if self.add_count:
if len(counts.shape) != len(ll_raw_obs.shape):
counts = np.expand_dims(counts, axis=1)
ll_obs = np.concatenate([ll_raw_obs, thetas, counts], 1)
else:
ll_obs = np.concatenate([ll_raw_obs, thetas], 1)
assert(ll_obs.shape[-1] == self.ll_obs_sz[0])
return ll_obs
# Update ll_obs to remove placeholders
def update_theta(self, ll_obs, hl_action):
# Take in single obs and high level action and update away the placehodler
assert(self.has_placeholder(ll_obs))
assert(ll_obs.shape == self.ll_obs_sz)
# Get theta
if self.hl_action_space.__class__.__name__ == 'Discrete':
assert(self.theta_sz == self.hl_action_space.n)
theta = torch.zeros(self.theta_sz)
theta[hl_action] = 1
else:
theta = torch.from_numpy(hl_action)
# Update observation with theta
if self.add_count:
ll_obs[self.ll_raw_obs_sz[0]:-1] = theta
else:
ll_obs[self.ll_raw_obs_sz[0]:] = theta
assert(not self.has_placeholder(ll_obs))
return ll_obs
# Check if ll_obs has a placeholder
def has_placeholder(self, ll_obs):
if float('inf') in ll_obs:
return True
else:
return False
| [
"torch.zeros",
"numpy.expand_dims",
"numpy.concatenate",
"torch.from_numpy"
] | [((1626, 1673), 'numpy.concatenate', 'np.concatenate', (['[ll_raw_obs, thetas, counts]', '(1)'], {}), '([ll_raw_obs, thetas, counts], 1)\n', (1640, 1673), True, 'import numpy as np\n'), ((1709, 1748), 'numpy.concatenate', 'np.concatenate', (['[ll_raw_obs, thetas]', '(1)'], {}), '([ll_raw_obs, thetas], 1)\n', (1723, 1748), True, 'import numpy as np\n'), ((2191, 2238), 'numpy.concatenate', 'np.concatenate', (['[ll_raw_obs, thetas, counts]', '(1)'], {}), '([ll_raw_obs, thetas, counts], 1)\n', (2205, 2238), True, 'import numpy as np\n'), ((2274, 2313), 'numpy.concatenate', 'np.concatenate', (['[ll_raw_obs, thetas]', '(1)'], {}), '([ll_raw_obs, thetas], 1)\n', (2288, 2313), True, 'import numpy as np\n'), ((2832, 2858), 'torch.zeros', 'torch.zeros', (['self.theta_sz'], {}), '(self.theta_sz)\n', (2843, 2858), False, 'import torch\n'), ((2926, 2953), 'torch.from_numpy', 'torch.from_numpy', (['hl_action'], {}), '(hl_action)\n', (2942, 2953), False, 'import torch\n'), ((1574, 1604), 'numpy.expand_dims', 'np.expand_dims', (['counts'], {'axis': '(1)'}), '(counts, axis=1)\n', (1588, 1604), True, 'import numpy as np\n'), ((2139, 2169), 'numpy.expand_dims', 'np.expand_dims', (['counts'], {'axis': '(1)'}), '(counts, axis=1)\n', (2153, 2169), True, 'import numpy as np\n')] |
"""
* @file compare_result.py
*
* Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
"""
import numpy as np
def compare(golden, result):
return np.sum(abs(golden-result) > 0.01) < 0.01*result.size
if __name__ == '__main__':
aicore_result_h = np.fromfile("../../result_files/output_0.bin",
dtype="float16")
aicore_result_c = np.fromfile("../../result_files/output_1.bin",
dtype="float16")
numpy_result_h = np.fromfile("output_golden_0.bin", dtype="float16")
numpy_result_c = np.fromfile("output_golden_1.bin", dtype="float16")
if compare(numpy_result_h, aicore_result_h) and\
compare(numpy_result_c, aicore_result_c):
print("compare success")
else:
print("compare failed")
| [
"numpy.fromfile"
] | [((479, 542), 'numpy.fromfile', 'np.fromfile', (['"""../../result_files/output_0.bin"""'], {'dtype': '"""float16"""'}), "('../../result_files/output_0.bin', dtype='float16')\n", (490, 542), True, 'import numpy as np\n'), ((601, 664), 'numpy.fromfile', 'np.fromfile', (['"""../../result_files/output_1.bin"""'], {'dtype': '"""float16"""'}), "('../../result_files/output_1.bin', dtype='float16')\n", (612, 664), True, 'import numpy as np\n'), ((724, 775), 'numpy.fromfile', 'np.fromfile', (['"""output_golden_0.bin"""'], {'dtype': '"""float16"""'}), "('output_golden_0.bin', dtype='float16')\n", (735, 775), True, 'import numpy as np\n'), ((798, 849), 'numpy.fromfile', 'np.fromfile', (['"""output_golden_1.bin"""'], {'dtype': '"""float16"""'}), "('output_golden_1.bin', dtype='float16')\n", (809, 849), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
from evaluations.racing_agent import Agent
class RacingAgent(Agent):
def __init__(self, checkpoint_path):
self.load(checkpoint_path)
def action(self, obs, state=None, **kwargs) -> np.ndarray:
observation = tf.constant(obs['lidar'], dtype=tf.float32)
action = self._policy(observation)
action = tf.squeeze(action)
return action.numpy(), None # second var is the state (state-less in this case)
def load(self, checkpoint_path):
self._policy = tf.saved_model.load(str(checkpoint_path))
if __name__ == '__main__':
agent = RacingAgent(checkpoint_path='policy')
observation = np.ones(shape=(1080,))
action = agent.action(observation)
print()
| [
"numpy.ones",
"tensorflow.constant",
"tensorflow.squeeze"
] | [((692, 714), 'numpy.ones', 'np.ones', ([], {'shape': '(1080,)'}), '(shape=(1080,))\n', (699, 714), True, 'import numpy as np\n'), ((277, 320), 'tensorflow.constant', 'tf.constant', (["obs['lidar']"], {'dtype': 'tf.float32'}), "(obs['lidar'], dtype=tf.float32)\n", (288, 320), True, 'import tensorflow as tf\n'), ((381, 399), 'tensorflow.squeeze', 'tf.squeeze', (['action'], {}), '(action)\n', (391, 399), True, 'import tensorflow as tf\n')] |
'''
@Time :
@Author : <NAME>
@File : demo_3d.py
@Brief :
'''
import argparse
import torch
from pathlib import Path
import numpy as np
from pcdet.config import cfg, cfg_from_yaml_file
from pcdet.datasets.plusai.plusai_bag_dataset import DemoDataset
from pcdet.models import build_network, load_data_to_gpu
from pcdet.utils import common_utils
from visual_utils.laserdetvis import LaserDetVis
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default='cfgs/kitti_models/second.yaml',
help='specify the config for demo')
parser.add_argument('--data_path', type=str, default='demo_data',
help='specify the point cloud data file or directory')
parser.add_argument('--ckpt', type=str, default=None, help='specify the pretrained model')
parser.add_argument('--ext', type=str, default='.bin', help='specify the extension of your point cloud data file')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
return args, cfg
logger = common_utils.create_logger()
class VisualizeDets(LaserDetVis):
def __init__(self, model, dataset):
super(VisualizeDets, self).__init__(show_img=False)
self.model = model
self.dataset = dataset
self.data_idx = np.arange(len(self.dataset)).tolist()
np.random.shuffle(self.data_idx)
self.offset = 0
self.update()
def update(self):
idx = self.offset % len(self.dataset)
# idx = self.data_idx[idx]
with torch.no_grad():
data_dict = self.dataset.__getitem__(idx)
logger.info(f'Visualized sample index: \t{idx + 1}')
data_dict = self.dataset.collate_batch([data_dict])
load_data_to_gpu(data_dict)
pred_dicts, _ = self.model.forward(data_dict)
# img_path = os.path.join(self.root_path, example['image_path'])
# img = cv2.imread(img_path)
# Show
gt_objs = None
if self.dataset.split == 'val':
gt_objs = self.dataset.val_data_list[idx]['annos']['gt_boxes_lidar']
self.update_view(idx,
points=data_dict['points'][:, 1:].cpu().numpy(),
objs=pred_dicts[0]['pred_boxes'].cpu(),
ref_scores=pred_dicts[0]['pred_scores'].cpu().numpy(),
ref_labels=pred_dicts[0]['pred_labels'].cpu().numpy(),
gt_objs=gt_objs,
# img=img
)
# interface
def key_press(self, event):
self.canvas.events.key_press.block()
if self.show_img:
self.img_canvas.events.key_press.block()
if event.key == 'N':
self.offset += 1
self.update()
elif event.key == 'B':
self.offset -= 1
self.update()
elif event.key == 'C':
self.intensity_mode = not self.intensity_mode
elif event.key == 'Q' or event.key == 'Escape':
self.destroy()
def main():
args, cfg = parse_config()
logger.info('-----------------Quick Demo 3D of OpenPCDet-------------------------')
demo_dataset = DemoDataset(
dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False,
root_path=Path(args.data_path), ext=args.ext, logger=logger
)
logger.info(f'Total number of samples: \t{len(demo_dataset)}')
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=demo_dataset)
model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=True)
model.cuda()
model.eval()
# print instructions
print("To navigate:")
print("\tb: back (previous scan)")
print("\tn: next (next scan)")
print("\tq: quit (exit program)")
# run the visualizer
vis = VisualizeDets(model, demo_dataset)
vis.run()
logger.info('Demo done.')
if __name__ == '__main__':
main()
| [
"pcdet.models.load_data_to_gpu",
"argparse.ArgumentParser",
"pcdet.config.cfg_from_yaml_file",
"pcdet.utils.common_utils.create_logger",
"pathlib.Path",
"torch.no_grad",
"numpy.random.shuffle"
] | [((1121, 1149), 'pcdet.utils.common_utils.create_logger', 'common_utils.create_logger', ([], {}), '()\n', (1147, 1149), False, 'from pcdet.utils import common_utils\n'), ((450, 499), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""arg parser"""'}), "(description='arg parser')\n", (473, 499), False, 'import argparse\n'), ((1049, 1087), 'pcdet.config.cfg_from_yaml_file', 'cfg_from_yaml_file', (['args.cfg_file', 'cfg'], {}), '(args.cfg_file, cfg)\n', (1067, 1087), False, 'from pcdet.config import cfg, cfg_from_yaml_file\n'), ((1392, 1424), 'numpy.random.shuffle', 'np.random.shuffle', (['self.data_idx'], {}), '(self.data_idx)\n', (1409, 1424), True, 'import numpy as np\n'), ((1568, 1583), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1581, 1583), False, 'import torch\n'), ((1757, 1784), 'pcdet.models.load_data_to_gpu', 'load_data_to_gpu', (['data_dict'], {}), '(data_dict)\n', (1773, 1784), False, 'from pcdet.models import build_network, load_data_to_gpu\n'), ((3221, 3241), 'pathlib.Path', 'Path', (['args.data_path'], {}), '(args.data_path)\n', (3225, 3241), False, 'from pathlib import Path\n')] |
from keras.models import Model
from keras.callbacks import ModelCheckpoint
from keras.layers.recurrent import LSTM
from keras.layers import Dense, Input, Embedding
from keras.preprocessing.sequence import pad_sequences
from collections import Counter
import nltk
import numpy as np
BATCH_SIZE = 64
NUM_EPOCHS = 100
HIDDEN_UNITS = 256
NUM_SAMPLES = 10000
MAX_VOCAB_SIZE = 10000
DATA_PATH = 'data/cmn.txt'
WEIGHT_FILE_PATH = 'models/eng-to-cmn/eng-to-cmn-word-weights.h5'
ARCHITECTURE_FILE_PATH = 'models/eng-to-cmn/eng-to-cmn-word-architecture.json'
input_counter = Counter()
target_counter = Counter()
lines = open(DATA_PATH, 'rt', encoding='utf8').read().split('\n')
for line in lines[: min(NUM_SAMPLES, len(lines)-1)]:
input_text, target_text = line.split('\t')
input_words = [w for w in nltk.word_tokenize(input_text.lower())]
target_text = '\t' + target_text + '\n'
for w in input_words:
input_counter[w] += 1
for char in target_text:
target_counter[char] += 1
input_word2idx = dict()
target_word2idx = dict()
for idx, word in enumerate(input_counter.most_common(MAX_VOCAB_SIZE)):
input_word2idx[word[0]] = idx + 2
for idx, word in enumerate(target_counter.most_common(MAX_VOCAB_SIZE)):
target_word2idx[word[0]] = idx
input_word2idx['PAD'] = 0
input_word2idx['UNK'] = 1
input_idx2word = dict([(idx, word) for word, idx in input_word2idx.items()])
target_idx2word = dict([(idx, word) for word, idx in target_word2idx.items()])
num_encoder_tokens = len(input_idx2word)
num_decoder_tokens = len(target_idx2word)
np.save('models/eng-to-cmn/eng-to-cmn-word-input-word2idx.npy', input_word2idx)
np.save('models/eng-to-cmn/eng-to-cmn-word-input-idx2word.npy', input_idx2word)
np.save('models/eng-to-cmn/eng-to-cmn-word-target-word2idx.npy', target_word2idx)
np.save('models/eng-to-cmn/eng-to-cmn-word-target-idx2word.npy', target_idx2word)
encoder_input_data = []
encoder_max_seq_length = 0
decoder_max_seq_length = 0
lines = open(DATA_PATH, 'rt', encoding='utf8').read().split('\n')
for line in lines[: min(NUM_SAMPLES, len(lines)-1)]:
input_text, target_text = line.split('\t')
target_text = '\t' + target_text + '\n'
input_words = [w for w in nltk.word_tokenize(input_text.lower())]
encoder_input_wids = []
for w in input_words:
w2idx = 1 # default [UNK]
if w in input_word2idx:
w2idx = input_word2idx[w]
encoder_input_wids.append(w2idx)
encoder_input_data.append(encoder_input_wids)
encoder_max_seq_length = max(len(encoder_input_wids), encoder_max_seq_length)
decoder_max_seq_length = max(len(target_text), decoder_max_seq_length)
encoder_input_data = pad_sequences(encoder_input_data, encoder_max_seq_length)
decoder_target_data = np.zeros(shape=(NUM_SAMPLES, decoder_max_seq_length, num_decoder_tokens))
decoder_input_data = np.zeros(shape=(NUM_SAMPLES, decoder_max_seq_length, num_decoder_tokens))
lines = open(DATA_PATH, 'rt', encoding='utf8').read().split('\n')
for lineIdx, line in enumerate(lines[: min(NUM_SAMPLES, len(lines)-1)]):
_, target_text = line.split('\t')
target_text = '\t' + target_text + '\n'
for idx, char in enumerate(target_text):
if char in target_word2idx:
w2idx = target_word2idx[char]
decoder_input_data[lineIdx, idx, w2idx] = 1
if idx > 0:
decoder_target_data[lineIdx, idx-1, w2idx] = 1
context = dict()
context['num_encoder_tokens'] = num_encoder_tokens
context['num_decoder_tokens'] = num_decoder_tokens
context['encoder_max_seq_length'] = encoder_max_seq_length
context['decoder_max_seq_length'] = decoder_max_seq_length
np.save('models/eng-to-cmn/eng-to-cmn-word-context.npy', context)
encoder_inputs = Input(shape=(None, ), name='encoder_inputs')
encoder_embedding = Embedding(input_dim=num_encoder_tokens, output_dim=HIDDEN_UNITS,
input_length=encoder_max_seq_length, name='encoder_embedding')
encoder_lstm = LSTM(units=HIDDEN_UNITS, return_state=True, name='encoder_lstm')
encoder_outputs, encoder_state_h, encoder_state_c = encoder_lstm(encoder_embedding(encoder_inputs))
encoder_states = [encoder_state_h, encoder_state_c]
decoder_inputs = Input(shape=(None, num_decoder_tokens), name='decoder_inputs')
decoder_lstm = LSTM(units=HIDDEN_UNITS, return_state=True, return_sequences=True, name='decoder_lstm')
decoder_outputs, decoder_state_h, decoder_state_c = decoder_lstm(decoder_inputs,
initial_state=encoder_states)
decoder_dense = Dense(units=num_decoder_tokens, activation='softmax', name='decoder_dense')
decoder_outputs = decoder_dense(decoder_outputs)
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
json = model.to_json()
open(ARCHITECTURE_FILE_PATH, 'w').write(json)
checkpoint = ModelCheckpoint(filepath=WEIGHT_FILE_PATH, save_best_only=True)
model.fit([encoder_input_data, decoder_input_data], decoder_target_data, batch_size=BATCH_SIZE, epochs=NUM_EPOCHS,
verbose=1, validation_split=0.2, callbacks=[checkpoint])
model.save_weights(WEIGHT_FILE_PATH)
| [
"numpy.save",
"keras.preprocessing.sequence.pad_sequences",
"keras.callbacks.ModelCheckpoint",
"numpy.zeros",
"keras.models.Model",
"keras.layers.Dense",
"keras.layers.Embedding",
"keras.layers.recurrent.LSTM",
"keras.layers.Input",
"collections.Counter"
] | [((567, 576), 'collections.Counter', 'Counter', ([], {}), '()\n', (574, 576), False, 'from collections import Counter\n'), ((594, 603), 'collections.Counter', 'Counter', ([], {}), '()\n', (601, 603), False, 'from collections import Counter\n'), ((1565, 1644), 'numpy.save', 'np.save', (['"""models/eng-to-cmn/eng-to-cmn-word-input-word2idx.npy"""', 'input_word2idx'], {}), "('models/eng-to-cmn/eng-to-cmn-word-input-word2idx.npy', input_word2idx)\n", (1572, 1644), True, 'import numpy as np\n'), ((1645, 1724), 'numpy.save', 'np.save', (['"""models/eng-to-cmn/eng-to-cmn-word-input-idx2word.npy"""', 'input_idx2word'], {}), "('models/eng-to-cmn/eng-to-cmn-word-input-idx2word.npy', input_idx2word)\n", (1652, 1724), True, 'import numpy as np\n'), ((1725, 1810), 'numpy.save', 'np.save', (['"""models/eng-to-cmn/eng-to-cmn-word-target-word2idx.npy"""', 'target_word2idx'], {}), "('models/eng-to-cmn/eng-to-cmn-word-target-word2idx.npy',\n target_word2idx)\n", (1732, 1810), True, 'import numpy as np\n'), ((1807, 1892), 'numpy.save', 'np.save', (['"""models/eng-to-cmn/eng-to-cmn-word-target-idx2word.npy"""', 'target_idx2word'], {}), "('models/eng-to-cmn/eng-to-cmn-word-target-idx2word.npy',\n target_idx2word)\n", (1814, 1892), True, 'import numpy as np\n'), ((2680, 2737), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['encoder_input_data', 'encoder_max_seq_length'], {}), '(encoder_input_data, encoder_max_seq_length)\n', (2693, 2737), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((2761, 2834), 'numpy.zeros', 'np.zeros', ([], {'shape': '(NUM_SAMPLES, decoder_max_seq_length, num_decoder_tokens)'}), '(shape=(NUM_SAMPLES, decoder_max_seq_length, num_decoder_tokens))\n', (2769, 2834), True, 'import numpy as np\n'), ((2856, 2929), 'numpy.zeros', 'np.zeros', ([], {'shape': '(NUM_SAMPLES, decoder_max_seq_length, num_decoder_tokens)'}), '(shape=(NUM_SAMPLES, decoder_max_seq_length, num_decoder_tokens))\n', (2864, 2929), True, 'import numpy as np\n'), ((3656, 3721), 'numpy.save', 'np.save', (['"""models/eng-to-cmn/eng-to-cmn-word-context.npy"""', 'context'], {}), "('models/eng-to-cmn/eng-to-cmn-word-context.npy', context)\n", (3663, 3721), True, 'import numpy as np\n'), ((3740, 3783), 'keras.layers.Input', 'Input', ([], {'shape': '(None,)', 'name': '"""encoder_inputs"""'}), "(shape=(None,), name='encoder_inputs')\n", (3745, 3783), False, 'from keras.layers import Dense, Input, Embedding\n'), ((3805, 3936), 'keras.layers.Embedding', 'Embedding', ([], {'input_dim': 'num_encoder_tokens', 'output_dim': 'HIDDEN_UNITS', 'input_length': 'encoder_max_seq_length', 'name': '"""encoder_embedding"""'}), "(input_dim=num_encoder_tokens, output_dim=HIDDEN_UNITS,\n input_length=encoder_max_seq_length, name='encoder_embedding')\n", (3814, 3936), False, 'from keras.layers import Dense, Input, Embedding\n'), ((3978, 4042), 'keras.layers.recurrent.LSTM', 'LSTM', ([], {'units': 'HIDDEN_UNITS', 'return_state': '(True)', 'name': '"""encoder_lstm"""'}), "(units=HIDDEN_UNITS, return_state=True, name='encoder_lstm')\n", (3982, 4042), False, 'from keras.layers.recurrent import LSTM\n'), ((4213, 4275), 'keras.layers.Input', 'Input', ([], {'shape': '(None, num_decoder_tokens)', 'name': '"""decoder_inputs"""'}), "(shape=(None, num_decoder_tokens), name='decoder_inputs')\n", (4218, 4275), False, 'from keras.layers import Dense, Input, Embedding\n'), ((4291, 4383), 'keras.layers.recurrent.LSTM', 'LSTM', ([], {'units': 'HIDDEN_UNITS', 'return_state': '(True)', 'return_sequences': '(True)', 'name': '"""decoder_lstm"""'}), "(units=HIDDEN_UNITS, return_state=True, return_sequences=True, name=\n 'decoder_lstm')\n", (4295, 4383), False, 'from keras.layers.recurrent import LSTM\n'), ((4571, 4646), 'keras.layers.Dense', 'Dense', ([], {'units': 'num_decoder_tokens', 'activation': '"""softmax"""', 'name': '"""decoder_dense"""'}), "(units=num_decoder_tokens, activation='softmax', name='decoder_dense')\n", (4576, 4646), False, 'from keras.layers import Dense, Input, Embedding\n'), ((4705, 4761), 'keras.models.Model', 'Model', (['[encoder_inputs, decoder_inputs]', 'decoder_outputs'], {}), '([encoder_inputs, decoder_inputs], decoder_outputs)\n', (4710, 4761), False, 'from keras.models import Model\n'), ((4916, 4979), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'WEIGHT_FILE_PATH', 'save_best_only': '(True)'}), '(filepath=WEIGHT_FILE_PATH, save_best_only=True)\n', (4931, 4979), False, 'from keras.callbacks import ModelCheckpoint\n')] |
import matplotlib.pyplot as plt
import numpy as np
def grafico (tempo_inicial, tempo_final, delta_t, frequencia, Valor_medio):
# Data for plotting
t = np.arange(tempo_inicial, tempo_final, delta_t)
s = Valor_medio + np.sin(frequencia * np.pi * t)
#s = t*t - 3*t + 1
fig, ax = plt.subplots()
ax.plot(t, s)
ax.set(xlabel='time (s)', ylabel='voltage (mV)',
title='About as simple as it gets, folks')
ax.grid()
fig.savefig("test.png")
plt.show()
grafico(0.0, 2.0, 0.01, 16.0, 0.0)
grafico(0.0, 2.0, 0.01, 8.0, 0.0)
grafico(0.0, 2.0, 0.01, 4.0, 0.0)
grafico(0.0, 2.0, 0.01, 2.0, 0.0) | [
"numpy.sin",
"numpy.arange",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((168, 214), 'numpy.arange', 'np.arange', (['tempo_inicial', 'tempo_final', 'delta_t'], {}), '(tempo_inicial, tempo_final, delta_t)\n', (177, 214), True, 'import numpy as np\n'), ((316, 330), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (328, 330), True, 'import matplotlib.pyplot as plt\n'), ((515, 525), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (523, 525), True, 'import matplotlib.pyplot as plt\n'), ((240, 270), 'numpy.sin', 'np.sin', (['(frequencia * np.pi * t)'], {}), '(frequencia * np.pi * t)\n', (246, 270), True, 'import numpy as np\n')] |
import argparse
import os
import time
import timeit
import numpy as np
from rsgd.algo.recur_mech import sgd_recur
from rsgd.common.dat import load_dat
from rsgd.common.logistic import logit_loss
from rsgd.common.logistic import logistic_grad
from rsgd.common.logistic import logistic_test
from rsgd.common.svm import hsvm_loss
from rsgd.common.svm import hsvm_grad
from rsgd.common.svm import svm_test
from sklearn.model_selection import RepeatedKFold
def epsilon_worst_case(sens, alpha, sigma_sq, delta):
sens_sq = (sens[-1])**2
rdp_eps = alpha*sens_sq/sigma_sq[:, np.newaxis]
dp_eps = rdp_eps + np.log(1.0/delta) / (alpha - 1.0)
return np.min(dp_eps, axis=1)
def main(args):
# load the dataset
dname = f"{args.dname}.dat"
fpath = os.path.join(args.data_dir, dname)
X, y = load_dat(fpath, normalize=args.norm, shuffle=args.shuffle)
N, dim = X.shape
# order of Renyi divergence
alpha = np.linspace(1.5, 1024, 1000)
delta = args.delta
sigma = np.array([0.005, 0.01, 0.025, 0.05, 0.1, 0.2, 0.3, 0.4, 0.6, 0.8])
sigma = np.flip(sigma, 0)
batch_size = args.batch_size
n_sigma = len(sigma)
cv_rep = 10
K = 5
n_rep = K * cv_rep
eps = np.zeros((n_sigma, n_rep))
acc = np.zeros_like(eps)
obj = np.zeros_like(eps)
j = 0
# task
if args.svm:
loss_func = hsvm_loss
grad_func = hsvm_grad
test_func = svm_test
y[y < 0.5] = -1.0
task = 'svm'
else:
loss_func = logit_loss
grad_func = logistic_grad
test_func = logistic_test
task = 'logres'
rkf = RepeatedKFold(n_splits=K, n_repeats=cv_rep)
for train_idx, test_idx in rkf.split(X):
train_X, train_y = X[train_idx, :], y[train_idx]
test_X, test_y = X[test_idx, :], y[test_idx]
noise = np.random.randn(dim)
# new recurrence relation
w, sens = sgd_recur(train_X, train_y, grad_func,
batch_size, args.T, args.L,
reg_coeff=args.mu, R=args.norm,
init_step=args.init_step,
verbose=False)
sigma_sq = 2.0 * np.square(sigma)
eps[:, j] = epsilon_worst_case(sens[-1, :], alpha, sigma_sq, delta)
noisy_w = w[-1, :] + sigma[:, np.newaxis] * noise
acc[:, j] = test_func(noisy_w, test_X, test_y)*100
obj[:, j] = loss_func(noisy_w, train_X, train_y, reg_coeff=args.mu)
j += 1
avg_acc = np.mean(acc, axis=1)
avg_eps = np.mean(eps, axis=1)
avg_obj = np.mean(obj, axis=1)
str_mu = "{0}".format(args.mu)[2:]
str_is = "{0}".format(args.init_step).replace('.', '').rstrip('0')
filename = "rsgdd_{5}_T{0}B{1}mu{2}IS{3}_{4}".format(
args.T, args.batch_size, str_mu, str_is, args.dname, task)
rs_dir = "./plot/results"
np.savetxt("{0}/{1}_eps.out".format(rs_dir, filename), avg_eps, fmt='%.5f')
np.savetxt("{0}/{1}_acc.out".format(rs_dir, filename), avg_acc, fmt='%.5f')
np.savetxt("{0}/{1}_obj.out".format(rs_dir, filename), avg_obj, fmt='%.5f')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='recursive mechanism')
parser.add_argument('dname', help='dataset name')
parser.add_argument('T', type=int, help='total number of iterations')
parser.add_argument('--data_dir', type=str, default=None)
parser.add_argument('--batch_size', type=int, default=4000)
parser.add_argument('--init_step', type=float, default=0.5)
parser.add_argument('--mu', type=float, default=0.001)
parser.add_argument('--L', type=float, default=1.81)
parser.add_argument('--delta', type=float, default=1e-8)
parser.add_argument('--norm', type=float, default=1.0)
parser.add_argument('--svm', action='store_true')
parser.add_argument('--shuffle', action='store_true')
args = parser.parse_args()
print("Running the program ... [{0}]".format(
time.strftime("%m/%d/%Y %H:%M:%S")))
print("Parameters")
print("----------")
for arg in vars(args):
print(" - {0:22s}: {1}".format(arg, getattr(args, arg)))
start_time = timeit.default_timer()
main(args)
elapsed = timeit.default_timer() - start_time
mins, sec = divmod(elapsed, 60)
hrs, mins = divmod(mins, 60)
print("The program finished. [{0}]".format(
time.strftime("%m/%d/%Y %H:%M:%S")))
print("Elasepd time: %d:%02d:%02d" % (hrs, mins, sec))
| [
"numpy.zeros_like",
"numpy.flip",
"argparse.ArgumentParser",
"numpy.log",
"numpy.random.randn",
"timeit.default_timer",
"numpy.square",
"numpy.zeros",
"time.strftime",
"rsgd.common.dat.load_dat",
"rsgd.algo.recur_mech.sgd_recur",
"numpy.min",
"numpy.mean",
"numpy.array",
"numpy.linspace"... | [((657, 679), 'numpy.min', 'np.min', (['dp_eps'], {'axis': '(1)'}), '(dp_eps, axis=1)\n', (663, 679), True, 'import numpy as np\n'), ((765, 799), 'os.path.join', 'os.path.join', (['args.data_dir', 'dname'], {}), '(args.data_dir, dname)\n', (777, 799), False, 'import os\n'), ((811, 869), 'rsgd.common.dat.load_dat', 'load_dat', (['fpath'], {'normalize': 'args.norm', 'shuffle': 'args.shuffle'}), '(fpath, normalize=args.norm, shuffle=args.shuffle)\n', (819, 869), False, 'from rsgd.common.dat import load_dat\n'), ((936, 964), 'numpy.linspace', 'np.linspace', (['(1.5)', '(1024)', '(1000)'], {}), '(1.5, 1024, 1000)\n', (947, 964), True, 'import numpy as np\n'), ((1000, 1066), 'numpy.array', 'np.array', (['[0.005, 0.01, 0.025, 0.05, 0.1, 0.2, 0.3, 0.4, 0.6, 0.8]'], {}), '([0.005, 0.01, 0.025, 0.05, 0.1, 0.2, 0.3, 0.4, 0.6, 0.8])\n', (1008, 1066), True, 'import numpy as np\n'), ((1079, 1096), 'numpy.flip', 'np.flip', (['sigma', '(0)'], {}), '(sigma, 0)\n', (1086, 1096), True, 'import numpy as np\n'), ((1216, 1242), 'numpy.zeros', 'np.zeros', (['(n_sigma, n_rep)'], {}), '((n_sigma, n_rep))\n', (1224, 1242), True, 'import numpy as np\n'), ((1253, 1271), 'numpy.zeros_like', 'np.zeros_like', (['eps'], {}), '(eps)\n', (1266, 1271), True, 'import numpy as np\n'), ((1282, 1300), 'numpy.zeros_like', 'np.zeros_like', (['eps'], {}), '(eps)\n', (1295, 1300), True, 'import numpy as np\n'), ((1620, 1663), 'sklearn.model_selection.RepeatedKFold', 'RepeatedKFold', ([], {'n_splits': 'K', 'n_repeats': 'cv_rep'}), '(n_splits=K, n_repeats=cv_rep)\n', (1633, 1663), False, 'from sklearn.model_selection import RepeatedKFold\n'), ((2506, 2526), 'numpy.mean', 'np.mean', (['acc'], {'axis': '(1)'}), '(acc, axis=1)\n', (2513, 2526), True, 'import numpy as np\n'), ((2541, 2561), 'numpy.mean', 'np.mean', (['eps'], {'axis': '(1)'}), '(eps, axis=1)\n', (2548, 2561), True, 'import numpy as np\n'), ((2576, 2596), 'numpy.mean', 'np.mean', (['obj'], {'axis': '(1)'}), '(obj, axis=1)\n', (2583, 2596), True, 'import numpy as np\n'), ((3145, 3203), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""recursive mechanism"""'}), "(description='recursive mechanism')\n", (3168, 3203), False, 'import argparse\n'), ((4156, 4178), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (4176, 4178), False, 'import timeit\n'), ((1837, 1857), 'numpy.random.randn', 'np.random.randn', (['dim'], {}), '(dim)\n', (1852, 1857), True, 'import numpy as np\n'), ((1911, 2054), 'rsgd.algo.recur_mech.sgd_recur', 'sgd_recur', (['train_X', 'train_y', 'grad_func', 'batch_size', 'args.T', 'args.L'], {'reg_coeff': 'args.mu', 'R': 'args.norm', 'init_step': 'args.init_step', 'verbose': '(False)'}), '(train_X, train_y, grad_func, batch_size, args.T, args.L,\n reg_coeff=args.mu, R=args.norm, init_step=args.init_step, verbose=False)\n', (1920, 2054), False, 'from rsgd.algo.recur_mech import sgd_recur\n'), ((4210, 4232), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (4230, 4232), False, 'import timeit\n'), ((611, 630), 'numpy.log', 'np.log', (['(1.0 / delta)'], {}), '(1.0 / delta)\n', (617, 630), True, 'import numpy as np\n'), ((2189, 2205), 'numpy.square', 'np.square', (['sigma'], {}), '(sigma)\n', (2198, 2205), True, 'import numpy as np\n'), ((3960, 3994), 'time.strftime', 'time.strftime', (['"""%m/%d/%Y %H:%M:%S"""'], {}), "('%m/%d/%Y %H:%M:%S')\n", (3973, 3994), False, 'import time\n'), ((4372, 4406), 'time.strftime', 'time.strftime', (['"""%m/%d/%Y %H:%M:%S"""'], {}), "('%m/%d/%Y %H:%M:%S')\n", (4385, 4406), False, 'import time\n')] |
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import cv2
import numpy as np
import utils
from numpy import matrix as mat
def generate_3d_points(spread=0.06 , npoints=3):
ax=np.linspace(-spread,spread,npoints)
xx,yy=np.meshgrid(ax,ax)
return np.vstack((xx.flatten(),yy.flatten(),np.zeros(len(ax)**2))).T
def manuver1():
#x,y,z,rx,ry,rz
vec=np.zeros(6)
vec[2]=3
#vec[3]=-90/180.0*np.pi
for _ in range(100):
vec[2]+=0.1
yield vec
for _ in range(50):
vec[3]+=1.0/180*np.pi
yield vec
def manuver2():
#x,y,z,rx,ry,rz
vec=np.zeros(6)
vec[2]=0.3
#vec[3]=-90/180.0*np.pi
for _ in range(50):
vec[2]+=0.01
yield vec
for rot_pitch,rot_roll in [(0,0),(200,20)]:
sz=0.004
for ind in range(400):
if ind==0:
step_x=-sz
step_y=0
if ind==100:
step_x=0
step_y=sz
if ind==200:
step_x=sz
step_y=0
if ind==300:
step_x=0
step_y=-sz
vec[0]-=step_x
vec[1]+=step_y
if rot_pitch>0:
pitch_sign=1 if (ind%rot_pitch)<(rot_pitch//2) else -1
vec[3]+=pitch_sign*0.3/180.0*np.pi
vec[5]+=pitch_sign*0.1/180.0*np.pi
vec[2]+=pitch_sign*0.001
if rot_roll>0:
roll_sign=1 if (ind%rot_roll)<(rot_roll//2) else -1
vec[4]+=roll_sign*0.2/180.0*np.pi
yield vec
def manuver3():
#x,y,z,rx,ry,rz
vec=np.zeros(6)
vec[2]=3
#vec[3]=-90/180.0*np.pi
for _ in range(50):
vec[2]+=0.1
yield vec
for rot_pitch,rot_roll in [(0,0)]:
for ind in range(400):
if ind==0:
step_x=-0.01
step_y=0
if ind==100:
step_x=0
step_y=0.01
if ind==200:
step_x=0.01
step_y=0
if ind==300:
step_x=0
step_y=-0.01
vec[0]-=step_x
vec[1]+=step_y
yield vec
for rot_pitch,rot_roll in [(0,30)]:
for ind in range(400):
if rot_pitch>0:
pitch_sign=1 if (ind%rot_pitch)<(rot_pitch//2) else -1
vec[3]+=pitch_sign*0.1/180.0*np.pi
if rot_roll>0:
roll_sign=1 if (ind%rot_roll)<(rot_roll//2) else -1
vec[4]+=roll_sign*0.1/180.0*np.pi
yield vec
class Capture(object):
def __init__(self,camera_matrix,size,noise_model=None, spread=0.06 , npoints=3):
self.K=mat(camera_matrix).reshape(3,3)
self.size=size
self.manuever=manuver2()
self.last_points=None
self.last_position=None
self.spread=spread
self.npoints=npoints
def read(self):
try:
self.last_position=self.manuever.__next__()
except StopIteration:
return False,None
x,y,z,rx,ry,rz=self.last_position
img=np.zeros((self.size[0],self.size[1],3),dtype='uint8')
C=np.array([x,y,z])
#0=RC+T T=-R.T*C
R=utils.eulerAnglesToRotationMatrix([rx,ry,rz])
T=-mat(R)*mat(C).T
if 0:
pts=(self.K*((mat(R)*generate_3d_points().T).T+T.T).T).T
pts/=pts[:,2]
pts=pts[:,:2].T
else:
R_vec,_=cv2.Rodrigues(R)
pts,jac=cv2.projectPoints(generate_3d_points(self.spread , self.npoints),R_vec,T.A1,self.K,np.zeros(5))
pts=pts.reshape(-1,2)
for ptm in pts:
#pt=list(map(int,ptm.A1))
pt=list(map(int,ptm))
cv2.rectangle(img,(pt[0]-1,pt[1]-1),(pt[0]+1,pt[1]+1),(0,255,0),1)
self.last_points=pts[:,:2]
return True,img
def track(self,*args,**kargs):
return self.last_points
if __name__=='__main__':
cap=Capture([160.0,0,160, 0,160.0,120.0,0,0,1],(240,320))
while 1:
ret,im=cap.read()
if ret:
cv2.imshow('img',im)
k=cv2.waitKey(0)%256
if k==27:
break
else:
import pdb;pdb.set_trace()
| [
"numpy.matrix",
"numpy.meshgrid",
"cv2.waitKey",
"utils.eulerAnglesToRotationMatrix",
"numpy.zeros",
"cv2.Rodrigues",
"numpy.array",
"pdb.set_trace",
"numpy.linspace",
"cv2.rectangle",
"cv2.imshow"
] | [((187, 224), 'numpy.linspace', 'np.linspace', (['(-spread)', 'spread', 'npoints'], {}), '(-spread, spread, npoints)\n', (198, 224), True, 'import numpy as np\n'), ((233, 252), 'numpy.meshgrid', 'np.meshgrid', (['ax', 'ax'], {}), '(ax, ax)\n', (244, 252), True, 'import numpy as np\n'), ((370, 381), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (378, 381), True, 'import numpy as np\n'), ((604, 615), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (612, 615), True, 'import numpy as np\n'), ((1645, 1656), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (1653, 1656), True, 'import numpy as np\n'), ((3161, 3217), 'numpy.zeros', 'np.zeros', (['(self.size[0], self.size[1], 3)'], {'dtype': '"""uint8"""'}), "((self.size[0], self.size[1], 3), dtype='uint8')\n", (3169, 3217), True, 'import numpy as np\n'), ((3225, 3244), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (3233, 3244), True, 'import numpy as np\n'), ((3287, 3334), 'utils.eulerAnglesToRotationMatrix', 'utils.eulerAnglesToRotationMatrix', (['[rx, ry, rz]'], {}), '([rx, ry, rz])\n', (3320, 3334), False, 'import utils\n'), ((3541, 3557), 'cv2.Rodrigues', 'cv2.Rodrigues', (['R'], {}), '(R)\n', (3554, 3557), False, 'import cv2\n'), ((3819, 3905), 'cv2.rectangle', 'cv2.rectangle', (['img', '(pt[0] - 1, pt[1] - 1)', '(pt[0] + 1, pt[1] + 1)', '(0, 255, 0)', '(1)'], {}), '(img, (pt[0] - 1, pt[1] - 1), (pt[0] + 1, pt[1] + 1), (0, 255,\n 0), 1)\n', (3832, 3905), False, 'import cv2\n'), ((4178, 4199), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'im'], {}), "('img', im)\n", (4188, 4199), False, 'import cv2\n'), ((4313, 4328), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (4326, 4328), False, 'import pdb\n'), ((2751, 2769), 'numpy.matrix', 'mat', (['camera_matrix'], {}), '(camera_matrix)\n', (2754, 2769), True, 'from numpy import matrix as mat\n'), ((3354, 3360), 'numpy.matrix', 'mat', (['R'], {}), '(R)\n', (3357, 3360), True, 'from numpy import matrix as mat\n'), ((3361, 3367), 'numpy.matrix', 'mat', (['C'], {}), '(C)\n', (3364, 3367), True, 'from numpy import matrix as mat\n'), ((3661, 3672), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (3669, 3672), True, 'import numpy as np\n'), ((4213, 4227), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (4224, 4227), False, 'import cv2\n'), ((3410, 3416), 'numpy.matrix', 'mat', (['R'], {}), '(R)\n', (3413, 3416), True, 'from numpy import matrix as mat\n')] |
from autoarray.structures.arrays.two_d import array_2d
from autoarray.structures.grids.two_d import grid_2d_pixelization
from autoarray.inversion import mapper_util
from autoarray.structures.grids.two_d import grid_2d_util
from autoarray.structures.arrays.two_d import array_2d_util
import itertools
import numpy as np
def mapper(
source_grid_slim,
source_pixelization_grid,
data_pixelization_grid=None,
hyper_data=None,
):
if isinstance(source_pixelization_grid, grid_2d_pixelization.Grid2DRectangular):
return MapperRectangular(
source_grid_slim=source_grid_slim,
source_pixelization_grid=source_pixelization_grid,
data_pixelization_grid=data_pixelization_grid,
hyper_image=hyper_data,
)
elif isinstance(source_pixelization_grid, grid_2d_pixelization.Grid2DVoronoi):
return MapperVoronoi(
source_grid_slim=source_grid_slim,
source_pixelization_grid=source_pixelization_grid,
data_pixelization_grid=data_pixelization_grid,
hyper_image=hyper_data,
)
class Mapper:
def __init__(
self,
source_grid_slim,
source_pixelization_grid,
data_pixelization_grid=None,
hyper_image=None,
):
"""
Abstract base class representing a mapper, which maps unmasked pixels on a masked 2D array (in the form of \
a grid, see the *hyper_galaxies.array.grid* module) to discretized pixels in a pixelization.
1D structures are used to represent these mappings, for example between the different grid in a grid \
(e.g. the / sub grid). This follows the syntax grid_to_grid, whereby the index of a value on one grid \
equals that of another grid, for example:
- data_to_pix[2] = 1 tells us that the 3rd pixel on a grid maps to the 2nd pixel of a pixelization.
- sub_to_pix4] = 2 tells us that the 5th sub-pixel of a sub-grid maps to the 3rd pixel of a pixelization.
- pix_to_data[2] = 5 tells us that the 3rd pixel of a pixelization maps to the 6th (unmasked) pixel of a \
grid.
Mapping Matrix:
The mapper allows us to create a mapping matrix, which is a matrix representing the mapping between every
unmasked pixel of a grid and the pixels of a pixelization. Non-zero entries signify a mapping, whereas zeros
signify no mapping.
For example, if the grid has 5 pixels and the pixelization 3 pixels, with the following mappings:
pixel 0 -> pixelization pixel 0
pixel 1 -> pixelization pixel 0
pixel 2 -> pixelization pixel 1
pixel 3 -> pixelization pixel 1
pixel 4 -> pixelization pixel 2
The mapping matrix (which is of dimensions regular_pixels x pixelization_pixels) would appear as follows:
[1, 0, 0] [0->0]
[1, 0, 0] [1->0]
[0, 1, 0] [2->1]
[0, 1, 0] [3->1]
[0, 0, 1] [4->2]
The mapping matrix is in fact built using the sub-grid of the grid, whereby each pixel is \
divided into a grid of sub-pixels which are all paired to pixels in the pixelization. The entires \
in the mapping matrix now become fractional values dependent on the sub-grid size. For example, for a 2x2 \
sub-grid in each pixel (which means the fraction value is 1.0/(2.0^2) = 0.25, if we have the following mappings:
pixel 0 -> sub pixel 0 -> pixelization pixel 0
pixel 0 -> sub pixel 1 -> pixelization pixel 1
pixel 0 -> sub pixel 2 -> pixelization pixel 1
pixel 0 -> sub pixel 3 -> pixelization pixel 1
pixel 1 -> sub pixel 0 -> pixelization pixel 1
pixel 1 -> sub pixel 1 -> pixelization pixel 1
pixel 1 -> sub pixel 2 -> pixelization pixel 1
pixel 1 -> sub pixel 3 -> pixelization pixel 1
pixel 2 -> sub pixel 0 -> pixelization pixel 2
pixel 2 -> sub pixel 1 -> pixelization pixel 2
pixel 2 -> sub pixel 2 -> pixelization pixel 3
pixel 2 -> sub pixel 3 -> pixelization pixel 3
The mapping matrix (which is still of dimensions regular_pixels x source_pixels) would appear as follows:
[0.25, 0.75, 0.0, 0.0] [1 sub-pixel maps to pixel 0, 3 map to pixel 1]
[ 0.0, 1.0, 0.0, 0.0] [All sub-pixels map to pixel 1]
[ 0.0, 0.0, 0.5, 0.5] [2 sub-pixels map to pixel 2, 2 map to pixel 3]
Parameters
----------
pixels : int
The number of pixels in the mapper's pixelization.
source_grid_slim: gridStack
A stack of grid's which are mapped to the pixelization (includes an and sub grid).
hyper_image : np.ndarray
A pre-computed hyper-image of the image the mapper is expected to reconstruct, used for adaptive analysis.
"""
self.source_grid_slim = source_grid_slim
self.source_pixelization_grid = source_pixelization_grid
self.data_pixelization_grid = data_pixelization_grid
self.mapping_matrix = mapper_util.mapping_matrix_from(
pixelization_index_for_sub_slim_index=self.pixelization_index_for_sub_slim_index,
pixels=self.pixels,
total_mask_pixels=self.source_grid_slim.mask.pixels_in_mask,
slim_index_for_sub_slim_index=self._slim_index_for_sub_slim_index,
sub_fraction=self.source_grid_slim.mask.sub_fraction,
)
self.hyper_image = hyper_image
@property
def pixels(self):
return self.source_pixelization_grid.pixels
@property
def _slim_index_for_sub_slim_index(self):
return self.source_grid_slim.mask._slim_index_for_sub_slim_index
@property
def pixelization_index_for_sub_slim_index(self):
raise NotImplementedError(
"pixelization_index_for_sub_slim_index should be overridden"
)
@property
def all_sub_slim_indexes_for_pixelization_index(self):
"""
Returns the mappings between a pixelization's pixels and the unmasked sub-grid pixels. These mappings \
are determined after the grid is used to determine the pixelization.
The pixelization's pixels map to different number of sub-grid pixels, thus a list of lists is used to \
represent these mappings"""
all_sub_slim_indexes_for_pixelization_index = [[] for _ in range(self.pixels)]
for slim_index, pix_index in enumerate(
self.pixelization_index_for_sub_slim_index
):
all_sub_slim_indexes_for_pixelization_index[pix_index].append(slim_index)
return all_sub_slim_indexes_for_pixelization_index
def pixel_signals_from_signal_scale(self, signal_scale):
return mapper_util.adaptive_pixel_signals_from(
pixels=self.pixels,
signal_scale=signal_scale,
pixelization_index_for_sub_slim_index=self.pixelization_index_for_sub_slim_index,
slim_index_for_sub_slim_index=self.source_grid_slim.mask._slim_index_for_sub_slim_index,
hyper_image=self.hyper_image,
)
def slim_indexes_from_pixelization_indexes(self, pixelization_indexes):
image_for_source = self.all_sub_slim_indexes_for_pixelization_index
if not any(isinstance(i, list) for i in pixelization_indexes):
return list(
itertools.chain.from_iterable(
[image_for_source[index] for index in pixelization_indexes]
)
)
else:
indexes = []
for source_pixel_index_list in pixelization_indexes:
indexes.append(
list(
itertools.chain.from_iterable(
[
image_for_source[index]
for index in source_pixel_index_list
]
)
)
)
return indexes
def reconstruction_from(self, solution_vector):
"""Given the solution vector of an inversion (see *inversions.Inversion*), determine the reconstructed \
pixelization of the rectangular pixelization by using the mapper."""
raise NotImplementedError()
class MapperRectangular(Mapper):
def __init__(
self,
source_grid_slim,
source_pixelization_grid,
data_pixelization_grid=None,
hyper_image=None,
):
""" Class representing a rectangular mapper, which maps unmasked pixels on a masked 2D array (in the form of \
a grid, see the *hyper_galaxies.array.grid* module) to pixels discretized on a rectangular grid.
The and uniform geometry of the rectangular grid is used to perform efficient pixel pairings.
Parameters
----------
pixels : int
The number of pixels in the rectangular pixelization (y_pixels*x_pixels).
source_grid_slim : gridStack
A stack of grid describing the observed image's pixel coordinates (e.g. an image-grid, sub-grid, etc.).
shape_native : (int, int)
The dimensions of the rectangular grid of pixels (y_pixels, x_pixel)
geometry : pixelization.Rectangular.Geometry
The geometry (e.g. y / x edge locations, pixel-scales) of the rectangular pixelization.
"""
super(MapperRectangular, self).__init__(
source_grid_slim=source_grid_slim,
source_pixelization_grid=source_pixelization_grid,
data_pixelization_grid=data_pixelization_grid,
hyper_image=hyper_image,
)
@property
def shape_native(self):
return self.source_pixelization_grid.shape_native
@property
def pixelization_index_for_sub_slim_index(self):
"""The 1D index mappings between the sub grid's pixels and rectangular pixelization's pixels"""
return grid_2d_util.grid_pixel_indexes_2d_slim_from(
grid_scaled_2d_slim=self.source_grid_slim,
shape_native=self.source_pixelization_grid.shape_native,
pixel_scales=self.source_pixelization_grid.pixel_scales,
origin=self.source_pixelization_grid.origin,
).astype("int")
def reconstruction_from(self, solution_vector):
"""Given the solution vector of an inversion (see *inversions.Inversion*), determine the reconstructed \
pixelization of the rectangular pixelization by using the mapper."""
recon = array_2d_util.array_2d_native_from(
array_2d_slim=solution_vector,
mask_2d=np.full(
fill_value=False, shape=self.source_pixelization_grid.shape_native
),
sub_size=1,
)
return array_2d.Array2D.manual(
array=recon,
sub_size=1,
pixel_scales=self.source_pixelization_grid.pixel_scales,
origin=self.source_pixelization_grid.origin,
)
class MapperVoronoi(Mapper):
def __init__(
self,
source_grid_slim,
source_pixelization_grid,
data_pixelization_grid=None,
hyper_image=None,
):
"""Class representing a Voronoi mapper, which maps unmasked pixels on a masked 2D array (in the form of \
a grid, see the *hyper_galaxies.array.grid* module) to pixels discretized on a Voronoi grid.
The irand non-uniform geometry of the Voronoi grid means efficient pixel pairings requires knowledge \
of how different grid map to one another.
Parameters
----------
pixels : int
The number of pixels in the Voronoi pixelization.
source_grid_slim : gridStack
A stack of grid describing the observed image's pixel coordinates (e.g. an image-grid, sub-grid, etc.).
voronoi : scipy.spatial.Voronoi
Class storing the Voronoi grid's
geometry : pixelization.Voronoi.Geometry
The geometry (e.g. y / x edge locations, pixel-scales) of the Vornoi pixelization.
hyper_image : np.ndarray
A pre-computed hyper-image of the image the mapper is expected to reconstruct, used for adaptive analysis.
"""
super().__init__(
source_grid_slim=source_grid_slim,
source_pixelization_grid=source_pixelization_grid,
data_pixelization_grid=data_pixelization_grid,
hyper_image=hyper_image,
)
@property
def pixelization_index_for_sub_slim_index(self):
"""
The 1D index mappings between the sub pixels and Voronoi pixelization pixels.
"""
return mapper_util.pixelization_index_for_voronoi_sub_slim_index_from(
grid=self.source_grid_slim,
nearest_pixelization_index_for_slim_index=self.source_pixelization_grid.nearest_pixelization_index_for_slim_index,
slim_index_for_sub_slim_index=self.source_grid_slim.mask._slim_index_for_sub_slim_index,
pixelization_grid=self.source_pixelization_grid,
pixel_neighbors=self.source_pixelization_grid.pixel_neighbors,
pixel_neighbors_size=self.source_pixelization_grid.pixel_neighbors_size,
).astype("int")
@property
def voronoi(self):
return self.source_pixelization_grid.voronoi
def reconstruction_from(self, solution_vector):
return solution_vector
| [
"numpy.full",
"autoarray.inversion.mapper_util.mapping_matrix_from",
"autoarray.inversion.mapper_util.adaptive_pixel_signals_from",
"autoarray.structures.grids.two_d.grid_2d_util.grid_pixel_indexes_2d_slim_from",
"itertools.chain.from_iterable",
"autoarray.inversion.mapper_util.pixelization_index_for_voro... | [((5179, 5511), 'autoarray.inversion.mapper_util.mapping_matrix_from', 'mapper_util.mapping_matrix_from', ([], {'pixelization_index_for_sub_slim_index': 'self.pixelization_index_for_sub_slim_index', 'pixels': 'self.pixels', 'total_mask_pixels': 'self.source_grid_slim.mask.pixels_in_mask', 'slim_index_for_sub_slim_index': 'self._slim_index_for_sub_slim_index', 'sub_fraction': 'self.source_grid_slim.mask.sub_fraction'}), '(pixelization_index_for_sub_slim_index=self.\n pixelization_index_for_sub_slim_index, pixels=self.pixels,\n total_mask_pixels=self.source_grid_slim.mask.pixels_in_mask,\n slim_index_for_sub_slim_index=self._slim_index_for_sub_slim_index,\n sub_fraction=self.source_grid_slim.mask.sub_fraction)\n', (5210, 5511), False, 'from autoarray.inversion import mapper_util\n'), ((6908, 7215), 'autoarray.inversion.mapper_util.adaptive_pixel_signals_from', 'mapper_util.adaptive_pixel_signals_from', ([], {'pixels': 'self.pixels', 'signal_scale': 'signal_scale', 'pixelization_index_for_sub_slim_index': 'self.pixelization_index_for_sub_slim_index', 'slim_index_for_sub_slim_index': 'self.source_grid_slim.mask._slim_index_for_sub_slim_index', 'hyper_image': 'self.hyper_image'}), '(pixels=self.pixels, signal_scale=\n signal_scale, pixelization_index_for_sub_slim_index=self.\n pixelization_index_for_sub_slim_index, slim_index_for_sub_slim_index=\n self.source_grid_slim.mask._slim_index_for_sub_slim_index, hyper_image=\n self.hyper_image)\n', (6947, 7215), False, 'from autoarray.inversion import mapper_util\n'), ((11035, 11195), 'autoarray.structures.arrays.two_d.array_2d.Array2D.manual', 'array_2d.Array2D.manual', ([], {'array': 'recon', 'sub_size': '(1)', 'pixel_scales': 'self.source_pixelization_grid.pixel_scales', 'origin': 'self.source_pixelization_grid.origin'}), '(array=recon, sub_size=1, pixel_scales=self.\n source_pixelization_grid.pixel_scales, origin=self.\n source_pixelization_grid.origin)\n', (11058, 11195), False, 'from autoarray.structures.arrays.two_d import array_2d\n'), ((7548, 7642), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['[image_for_source[index] for index in pixelization_indexes]'], {}), '([image_for_source[index] for index in\n pixelization_indexes])\n', (7577, 7642), False, 'import itertools\n'), ((10184, 10444), 'autoarray.structures.grids.two_d.grid_2d_util.grid_pixel_indexes_2d_slim_from', 'grid_2d_util.grid_pixel_indexes_2d_slim_from', ([], {'grid_scaled_2d_slim': 'self.source_grid_slim', 'shape_native': 'self.source_pixelization_grid.shape_native', 'pixel_scales': 'self.source_pixelization_grid.pixel_scales', 'origin': 'self.source_pixelization_grid.origin'}), '(grid_scaled_2d_slim=self.\n source_grid_slim, shape_native=self.source_pixelization_grid.\n shape_native, pixel_scales=self.source_pixelization_grid.pixel_scales,\n origin=self.source_pixelization_grid.origin)\n', (10228, 10444), False, 'from autoarray.structures.grids.two_d import grid_2d_util\n'), ((10874, 10949), 'numpy.full', 'np.full', ([], {'fill_value': '(False)', 'shape': 'self.source_pixelization_grid.shape_native'}), '(fill_value=False, shape=self.source_pixelization_grid.shape_native)\n', (10881, 10949), True, 'import numpy as np\n'), ((12966, 13479), 'autoarray.inversion.mapper_util.pixelization_index_for_voronoi_sub_slim_index_from', 'mapper_util.pixelization_index_for_voronoi_sub_slim_index_from', ([], {'grid': 'self.source_grid_slim', 'nearest_pixelization_index_for_slim_index': 'self.source_pixelization_grid.nearest_pixelization_index_for_slim_index', 'slim_index_for_sub_slim_index': 'self.source_grid_slim.mask._slim_index_for_sub_slim_index', 'pixelization_grid': 'self.source_pixelization_grid', 'pixel_neighbors': 'self.source_pixelization_grid.pixel_neighbors', 'pixel_neighbors_size': 'self.source_pixelization_grid.pixel_neighbors_size'}), '(grid=self.\n source_grid_slim, nearest_pixelization_index_for_slim_index=self.\n source_pixelization_grid.nearest_pixelization_index_for_slim_index,\n slim_index_for_sub_slim_index=self.source_grid_slim.mask.\n _slim_index_for_sub_slim_index, pixelization_grid=self.\n source_pixelization_grid, pixel_neighbors=self.source_pixelization_grid\n .pixel_neighbors, pixel_neighbors_size=self.source_pixelization_grid.\n pixel_neighbors_size)\n', (13028, 13479), False, 'from autoarray.inversion import mapper_util\n'), ((7886, 7983), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['[image_for_source[index] for index in source_pixel_index_list]'], {}), '([image_for_source[index] for index in\n source_pixel_index_list])\n', (7915, 7983), False, 'import itertools\n')] |
import os.path as op
import numpy as np
import argparse
import matplotlib.pyplot as plt
from pandas import read_csv
from tqdm import tqdm
from mne.io import read_raw_edf, read_raw_bdf
from mne import Annotations, events_from_annotations
def _find_pd_candidates(pd, sfreq, chunk, baseline, zscore, min_i, overlap,
verbose=True):
if verbose:
print('Finding photodiode events')
''' Move in chunks twice as long as your longest photodiode signal
with the first 0.25 as baseline to test whether the signal goes
above/below baseline and back below/above.
The event onset must have a minimum length `min_i` and
`overlap` should be set such that no events will be missed
and if a pd event gets cut off, you'll find it the next chunk'''
chunk_i = int(chunk * sfreq)
baseline_i = int(chunk_i * baseline / 2)
pd_candidates = set()
for i in tqdm(range(baseline_i, len(pd) - chunk_i - baseline_i,
int(chunk_i * overlap))):
b = pd[i - baseline_i:i]
s = (pd[i:i + chunk_i] - np.median(b)) / np.std(b)
for binary_s in [s > zscore, s < -zscore]:
if not binary_s[0]: # must start off
onset = np.where(binary_s)[0]
if onset.size > min_i:
e = onset[0]
offset = np.where(1 - binary_s[e:])[0]
# must have an offset and no more events
if offset.size > 0 and not any(binary_s[e + offset[0]:]):
if not any([i + e + j in pd_candidates for j in
range(-1, 2)]): # off by one error
pd_candidates.add(i + e)
if verbose:
print('{} photodiode candidate events found'.format(
len(pd_candidates)))
return pd_candidates
def _nearest_pd_event(b_event, pd_candidates, max_index):
j = 0
# short circuit for alignments way to far forward
if b_event >= max_index:
return max_index - b_event
while (int(b_event + j) not in
pd_candidates and int(b_event - j) not in
pd_candidates and b_event + j < max_index and b_event - j > 0):
j += 1
return j
def _find_best_alignment(beh_events, pd_candidates, first_pd_alignment_n,
verbose=True):
if verbose:
print('Finding best alignment with behavioral events using the '
'first %i events' % first_pd_alignment_n)
min_error = best_alignment = best_errors = None
max_index = max(pd_candidates)
sorted_pds = np.array(sorted(pd_candidates))
# want first_pd_alignment to be small to avoid drift
for i in tqdm(range(len(pd_candidates) - first_pd_alignment_n)):
these_beh_events = beh_events.copy() + sorted_pds[i]
errors = [_nearest_pd_event(b_event, pd_candidates, max_index) for
b_event in these_beh_events[:first_pd_alignment_n]]
median_error = np.median(errors)
if min_error is None or median_error < min_error:
best_alignment = i
min_error = median_error
best_errors = errors
if verbose:
print('Best alignment starting with photodiode event '
'#%i, min %i, q1 %i, med %i, q3 %i, max %i ' %
(best_alignment, min(best_errors),
np.quantile(best_errors, 0.25), np.median(best_errors),
np.quantile(best_errors, 0.75), max(best_errors)))
if len(sorted_pds) - best_alignment >= len(beh_events): # extra pds at end
diffs = \
[pd_e - beh_e for pd_e, beh_e in
zip(sorted_pds[best_alignment:best_alignment + len(beh_events)],
beh_events)]
else: # extra beh events (missing last 1+ pd events for whatever reason)
diffs = [pd_e - beh_e for pd_e, beh_e in
zip(sorted_pds[best_alignment:],
beh_events[:len(sorted_pds) - best_alignment])]
return np.median(diffs)
def _exclude_ambiguous_events(beh_events, pd_candidates, pd, sfreq, chunk,
best_diff, exclude_shift, verbose=True):
if verbose:
print('Excluding events that have zero close events or more than '
'one photodiode event within `chunk` time')
events = dict()
errors = dict()
chunk_i = int(chunk * sfreq)
max_index = max(pd_candidates)
sorted_pds = np.array(sorted(pd_candidates))
beh_events_aligned = beh_events.copy() + best_diff
for i, b_event in enumerate(beh_events_aligned):
j = _nearest_pd_event(b_event, pd_candidates, max_index)
if j > sfreq * exclude_shift:
if verbose:
print('Excluding event %i off by %i samples, ' % (i, j))
plt.plot(pd[int(b_event - 10 * sfreq):
int(b_event + 10 * sfreq)])
plt.show()
else:
if int(b_event + j) in pd_candidates:
beh_events_aligned += j
events[i] = int(b_event + j)
errors[i] = j
else:
beh_events_aligned -= j
events[i] = int(b_event - j)
errors[i] = -j
pd_events = np.logical_and(sorted_pds < (events[i] + chunk_i),
sorted_pds > (events[i] - chunk_i))
if sum(pd_events) > 1:
events.pop(i)
errors.pop(i)
if verbose:
print('%i events found for behvaior event %i, excluding' %
(sum(pd_events), i))
plt.plot(pd[int(b_event - 10 * sfreq):
int(b_event + 10 * sfreq)])
plt.show()
if verbose:
print(errors)
print('Final behavior event-photodiode event differences '
'min %i, q1 %i, med %i, q3 %i, max %i ' %
(min(errors.values()), np.quantile(list(errors.values()), 0.25),
np.median(list(errors.values())),
np.quantile(list(errors.values()), 0.75), max(errors.values())))
trials = sorted(errors.keys())
plt.plot(trials, [errors[t] for t in trials])
plt.ylabel('Difference (samples)')
plt.xlabel('Trial')
plt.title('Photodiode Events Compared to Behavior Events')
plt.show()
return events
def _add_events_to_raw(raw, events, pd_event_name, relative_events):
onsets = np.array([events[i] for i in sorted(events.keys())])
annot = Annotations(onset=raw.times[onsets],
duration=np.repeat(0.1, len(onsets)),
description=np.repeat(pd_event_name,
len(onsets)))
if relative_events is not None:
for name, beh_array in relative_events.items():
onsets = \
[events[i] + int(np.round(beh_array[i] * raw.info['sfreq']))
for i in sorted(events.keys()) if not np.isnan(beh_array[i])]
annot += Annotations(onset=raw.times[np.array(onsets)],
duration=np.repeat(0.1, len(onsets)),
description=np.repeat(name, len(onsets)))
raw.set_annotations(annot)
return raw
def parse_pd_events(eegf, beh_events, pd_event_name='Fixation', chunk=2,
baseline=0.25, overlap=0.25, exclude_shift=0.1, zscore=10,
min_i=10, first_pd_alignment_n=20, relative_events=None,
overwrite_raw=True, verbose=True):
''' Parses photodiode events from a likely very corrupted channel
using behavioral data to sync events to determine which
behavioral events don't have a match and are thus corrupted
and should be excluded (while ignoring events that look like
photodiode events but don't match behavior)
Parameters
----------
eegf: str
The filepath to the eeg file.
beh_events: np.array | list
The events (in seconds) for the behavior that is matched
to the photodiode.
pd_event_name: str
The name of the event corresponding to the photodiode.
chunk: float
The size of the window to chunk the photodiode events by
should be larger than 2x the longest photodiode event
baseline: float
How much relative to the chunk to use to idenify the time before
the photodiode event.
overlap: float
How much to overlap the windows of the photodiode event-finding
process.
exclude_shift: float
How many seconds different than expected from the behavior events
to exclude that event.
zscore: float
How large of a z-score difference to use to threshold photodiode
events.
min_i: int
The minimum number of samples the photodiode event must be on for.
first_pd_alignment_n : int
How many samples to use to score the alignment to the first event.
This number should be low to have an accurate score without the
photodiode drifting and causing unexpected alignments (and it's
faster).
relative_events: dict | None
Whether to add events with keys of the event name and values
of the list of relative times (in seconds) compared to the
photodiode event.1
Returns
-------
events: DataFrame
A DataFrame that has a column for to the (zero)
indexed behavioral events and another column corresponding
to the time stamp of the eeg file.
'''
if op.splitext(eegf)[-1] == '.edf':
raw = read_raw_edf(eegf, preload=True)
elif op.splitext(eegf)[-1] == '.bdf':
raw = read_raw_bdf(eegf, preload=True)
raw.plot()
plt.show()
pd = None
pds = list()
while pd != '' and len(pds) < 2 and pd not in raw.ch_names:
pd = input('pd%i ch?\t' % len(pds))
if pd not in raw.ch_names:
print('Channel not in raw')
else:
pds.append(pd)
pd = None
if len(pds) == 2:
pd = raw._data[raw.ch_names.index(pds[0])]
pd -= raw._data[raw.ch_names.index(pds[1])]
else:
pd = raw._data[raw.ch_names.index(pds[0])]
pd_candidates = _find_pd_candidates(pd, raw.info['sfreq'], chunk, baseline,
zscore, min_i, overlap, verbose)
first_pd_alignment_n = int(min([len(pd_candidates) / 2,
first_pd_alignment_n]))
beh_events *= raw.info['sfreq']
beh_events -= beh_events[0]
best_diff = _find_best_alignment(beh_events, pd_candidates,
first_pd_alignment_n, verbose)
events = _exclude_ambiguous_events(beh_events, pd_candidates, pd,
raw.info['sfreq'], chunk,
best_diff, exclude_shift, verbose)
raw = _add_events_to_raw(raw, events, pd_event_name, relative_events)
return raw, pds
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--eegf', type=str, required=True,
help='The eeg filepath')
parser.add_argument('--behf', type=str, required=True,
help='The behavioral tsv filepath')
parser.add_argument('--beh_col', type=str, required=False,
default='fix_onset_time',
help='The name of the behavioral column '
'corresponding to the photodiode event timing')
parser.add_argument('--pd_event_name', type=str, required=False,
default='Fixation',
help='The name of the photodiode event')
parser.add_argument('--diff', type=bool, required=False,
default=False, help='Whether the behavior column is '
'an absolute time stamp or a difference (and thus '
'should be cumulatively summed')
parser.add_argument('--chunk', type=float, required=False,
default=2, help='How large to window the '
'photodiode events, should be 2x longest event')
parser.add_argument('--exclude_shift', type=float, required=False,
default=0.1, help='How many seconds off to exclude a '
'photodiode-behavioral event difference')
parser.add_argument('--relative_event_cols', type=str, nargs='*',
required=False,
default=['fix_duration', 'go_time', 'response_time'],
help='A behavioral column in the tsv file that has '
'the time relative to the photodiode events on the '
'same trial as in the `--beh_col event.')
parser.add_argument('--relative_event_names', type=str, nargs='*',
required=False,
default=['ISI Onset', 'Go Cue', 'Response'],
help='The name of the corresponding '
'`--relative_event_cols` events')
args = parser.parse_args()
df = read_csv(args.behf, sep='\t')
beh_events = np.array(df[args.beh_col])
if args.diff:
beh_events = np.array([0] + list(np.cumsum(beh_events)))
if args.relative_event_names:
if len(args.relative_event_cols) != len(args.relative_event_names):
raise ValueError('Mismatched length of relative event behavior '
'file column names and names of the events')
relative_events = [np.array(df[rel_event]) for rel_event in
args.relative_event_cols]
relative_events = {name: rel_events for rel_events, name in
zip(relative_events, args.relative_event_names)}
'''e.g. relative_event_names = ['ISI Onset', 'Go Cue', 'Response']
relative_event_cols = ['fix_duration', 'go_time', 'response_time']
'''
print(relative_events)
else:
relative_events = None
raw, pds = parse_pd_events(args.eegf, beh_events, args.pd_event_name,
chunk=args.chunk,
relative_events=relative_events,
exclude_shift=args.exclude_shift)
events, event_id = events_from_annotations(raw)
np.savetxt(op.splitext(args.eegf)[0] + '_pd_events.tsv', events, fmt='%i',
delimiter='\t')
with open(op.splitext(args.eegf)[0] + '_pd_event_id.tsv', 'w') as f:
for name, e_id in event_id.items():
f.write(name + '\t' + str(e_id) + '\n')
raw.annotations.save(op.splitext(args.eegf)[0] + '_pd-annot.fif')
with open(op.splitext(args.eegf)[0] + '_pd_channels.tsv', 'w') as f:
f.write('\t'.join(pds))
| [
"matplotlib.pyplot.title",
"argparse.ArgumentParser",
"pandas.read_csv",
"numpy.isnan",
"numpy.round",
"mne.events_from_annotations",
"numpy.std",
"numpy.cumsum",
"matplotlib.pyplot.show",
"numpy.median",
"mne.io.read_raw_bdf",
"matplotlib.pyplot.ylabel",
"numpy.quantile",
"matplotlib.pypl... | [((4016, 4032), 'numpy.median', 'np.median', (['diffs'], {}), '(diffs)\n', (4025, 4032), True, 'import numpy as np\n'), ((9813, 9823), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9821, 9823), True, 'import matplotlib.pyplot as plt\n'), ((11105, 11130), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (11128, 11130), False, 'import argparse\n'), ((13226, 13255), 'pandas.read_csv', 'read_csv', (['args.behf'], {'sep': '"""\t"""'}), "(args.behf, sep='\\t')\n", (13234, 13255), False, 'from pandas import read_csv\n'), ((13273, 13299), 'numpy.array', 'np.array', (['df[args.beh_col]'], {}), '(df[args.beh_col])\n', (13281, 13299), True, 'import numpy as np\n'), ((14422, 14450), 'mne.events_from_annotations', 'events_from_annotations', (['raw'], {}), '(raw)\n', (14445, 14450), False, 'from mne import Annotations, events_from_annotations\n'), ((3005, 3022), 'numpy.median', 'np.median', (['errors'], {}), '(errors)\n', (3014, 3022), True, 'import numpy as np\n'), ((6211, 6256), 'matplotlib.pyplot.plot', 'plt.plot', (['trials', '[errors[t] for t in trials]'], {}), '(trials, [errors[t] for t in trials])\n', (6219, 6256), True, 'import matplotlib.pyplot as plt\n'), ((6265, 6299), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Difference (samples)"""'], {}), "('Difference (samples)')\n", (6275, 6299), True, 'import matplotlib.pyplot as plt\n'), ((6308, 6327), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Trial"""'], {}), "('Trial')\n", (6318, 6327), True, 'import matplotlib.pyplot as plt\n'), ((6336, 6394), 'matplotlib.pyplot.title', 'plt.title', (['"""Photodiode Events Compared to Behavior Events"""'], {}), "('Photodiode Events Compared to Behavior Events')\n", (6345, 6394), True, 'import matplotlib.pyplot as plt\n'), ((6403, 6413), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6411, 6413), True, 'import matplotlib.pyplot as plt\n'), ((9672, 9704), 'mne.io.read_raw_edf', 'read_raw_edf', (['eegf'], {'preload': '(True)'}), '(eegf, preload=True)\n', (9684, 9704), False, 'from mne.io import read_raw_edf, read_raw_bdf\n'), ((1115, 1124), 'numpy.std', 'np.std', (['b'], {}), '(b)\n', (1121, 1124), True, 'import numpy as np\n'), ((5270, 5356), 'numpy.logical_and', 'np.logical_and', (['(sorted_pds < events[i] + chunk_i)', '(sorted_pds > events[i] - chunk_i)'], {}), '(sorted_pds < events[i] + chunk_i, sorted_pds > events[i] -\n chunk_i)\n', (5284, 5356), True, 'import numpy as np\n'), ((9625, 9642), 'os.path.splitext', 'op.splitext', (['eegf'], {}), '(eegf)\n', (9636, 9642), True, 'import os.path as op\n'), ((9761, 9793), 'mne.io.read_raw_bdf', 'read_raw_bdf', (['eegf'], {'preload': '(True)'}), '(eegf, preload=True)\n', (9773, 9793), False, 'from mne.io import read_raw_edf, read_raw_bdf\n'), ((13671, 13694), 'numpy.array', 'np.array', (['df[rel_event]'], {}), '(df[rel_event])\n', (13679, 13694), True, 'import numpy as np\n'), ((1099, 1111), 'numpy.median', 'np.median', (['b'], {}), '(b)\n', (1108, 1111), True, 'import numpy as np\n'), ((4922, 4932), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4930, 4932), True, 'import matplotlib.pyplot as plt\n'), ((9714, 9731), 'os.path.splitext', 'op.splitext', (['eegf'], {}), '(eegf)\n', (9725, 9731), True, 'import os.path as op\n'), ((14466, 14488), 'os.path.splitext', 'op.splitext', (['args.eegf'], {}), '(args.eegf)\n', (14477, 14488), True, 'import os.path as op\n'), ((14755, 14777), 'os.path.splitext', 'op.splitext', (['args.eegf'], {}), '(args.eegf)\n', (14766, 14777), True, 'import os.path as op\n'), ((1250, 1268), 'numpy.where', 'np.where', (['binary_s'], {}), '(binary_s)\n', (1258, 1268), True, 'import numpy as np\n'), ((3386, 3416), 'numpy.quantile', 'np.quantile', (['best_errors', '(0.25)'], {}), '(best_errors, 0.25)\n', (3397, 3416), True, 'import numpy as np\n'), ((3418, 3440), 'numpy.median', 'np.median', (['best_errors'], {}), '(best_errors)\n', (3427, 3440), True, 'import numpy as np\n'), ((3457, 3487), 'numpy.quantile', 'np.quantile', (['best_errors', '(0.75)'], {}), '(best_errors, 0.75)\n', (3468, 3487), True, 'import numpy as np\n'), ((5784, 5794), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5792, 5794), True, 'import matplotlib.pyplot as plt\n'), ((13359, 13380), 'numpy.cumsum', 'np.cumsum', (['beh_events'], {}), '(beh_events)\n', (13368, 13380), True, 'import numpy as np\n'), ((14575, 14597), 'os.path.splitext', 'op.splitext', (['args.eegf'], {}), '(args.eegf)\n', (14586, 14597), True, 'import os.path as op\n'), ((14814, 14836), 'os.path.splitext', 'op.splitext', (['args.eegf'], {}), '(args.eegf)\n', (14825, 14836), True, 'import os.path as op\n'), ((1373, 1399), 'numpy.where', 'np.where', (['(1 - binary_s[e:])'], {}), '(1 - binary_s[e:])\n', (1381, 1399), True, 'import numpy as np\n'), ((6949, 6991), 'numpy.round', 'np.round', (["(beh_array[i] * raw.info['sfreq'])"], {}), "(beh_array[i] * raw.info['sfreq'])\n", (6957, 6991), True, 'import numpy as np\n'), ((7048, 7070), 'numpy.isnan', 'np.isnan', (['beh_array[i]'], {}), '(beh_array[i])\n', (7056, 7070), True, 'import numpy as np\n'), ((7121, 7137), 'numpy.array', 'np.array', (['onsets'], {}), '(onsets)\n', (7129, 7137), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import tensorflow as tf
import dataset.plots as pl
import dataset.dataloader as dl
tf.random.set_seed(0)
from dataset.coco import cn as cfg
cfg.DATASET.INPUT_SHAPE = [512, 384, 3]
cfg.DATASET.NORM = False
cfg.DATASET.BGR = True
cfg.DATASET.HALF_BODY_PROB = 1.
ds = dl.load_tfds(cfg, 'val', det=False, predict_kp=True, drop_remainder=False, visualize=True)
for i, (ids, imgs, kps, Ms, scores, hms, valids) in enumerate(ds):
f = 18 * 3 - 1
for i in range(cfg.TRAIN.BATCH_SIZE):
kp = kps[i]
if np.sum(kp[:,2][17:]) > 0:
img = imgs[i]
pl.plot_image(np.uint8(img), hms[i], kp[:, -1].numpy())
cv2.imshow('', dl.visualize(np.uint8(img), kp[:, :2].numpy(), kp[:, -1].numpy()))
cv2.waitKey()
cv2.destroyAllWindows()
| [
"tensorflow.random.set_seed",
"numpy.uint8",
"numpy.sum",
"cv2.waitKey",
"cv2.destroyAllWindows",
"dataset.dataloader.load_tfds"
] | [((115, 136), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(0)'], {}), '(0)\n', (133, 136), True, 'import tensorflow as tf\n'), ((299, 393), 'dataset.dataloader.load_tfds', 'dl.load_tfds', (['cfg', '"""val"""'], {'det': '(False)', 'predict_kp': '(True)', 'drop_remainder': '(False)', 'visualize': '(True)'}), "(cfg, 'val', det=False, predict_kp=True, drop_remainder=False,\n visualize=True)\n", (311, 393), True, 'import dataset.dataloader as dl\n'), ((549, 570), 'numpy.sum', 'np.sum', (['kp[:, 2][17:]'], {}), '(kp[:, 2][17:])\n', (555, 570), True, 'import numpy as np\n'), ((775, 788), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (786, 788), False, 'import cv2\n'), ((801, 824), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (822, 824), False, 'import cv2\n'), ((627, 640), 'numpy.uint8', 'np.uint8', (['img'], {}), '(img)\n', (635, 640), True, 'import numpy as np\n'), ((709, 722), 'numpy.uint8', 'np.uint8', (['img'], {}), '(img)\n', (717, 722), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
import argparse
import base64
import h5py
import logging
import numpy as np
import PIL.Image
import os
import sys
try:
from io import StringIO
except ImportError:
from io import StringIO
# Add path for DIGITS package
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import digits.config # noqa
from digits import utils, log # noqa
from digits.inference.errors import InferenceError # noqa
from digits.job import Job # noqa
from digits.utils.lmdbreader import DbReader # noqa
# Import digits.config before caffe to set the path
import caffe_pb2 # noqa
logger = logging.getLogger('digits.tools.inference')
"""
Perform inference on a list of images using the specified model
"""
def infer(input_list,
output_dir,
jobs_dir,
model_id,
epoch,
batch_size,
layers,
gpu,
input_is_db,
resize):
"""
Perform inference on a list of images using the specified model
"""
# job directory defaults to that defined in DIGITS config
if jobs_dir == 'none':
jobs_dir = digits.config.config_value('jobs_dir')
# load model job
model_dir = os.path.join(jobs_dir, model_id)
assert os.path.isdir(model_dir), "Model dir %s does not exist" % model_dir
model = Job.load(model_dir)
# load dataset job
dataset_dir = os.path.join(jobs_dir, model.dataset_id)
assert os.path.isdir(dataset_dir), "Dataset dir %s does not exist" % dataset_dir
dataset = Job.load(dataset_dir)
for task in model.tasks:
task.dataset = dataset
# retrieve snapshot file
task = model.train_task()
snapshot_filename = None
epoch = float(epoch)
if epoch == -1 and len(task.snapshots):
# use last epoch
epoch = task.snapshots[-1][1]
snapshot_filename = task.snapshots[-1][0]
else:
for f, e in task.snapshots:
if e == epoch:
snapshot_filename = f
break
if not snapshot_filename:
raise InferenceError("Unable to find snapshot for epoch=%s" % repr(epoch))
# retrieve image dimensions and resize mode
image_dims = dataset.get_feature_dims()
height = image_dims[0]
width = image_dims[1]
channels = image_dims[2]
resize_mode = dataset.resize_mode if hasattr(dataset, 'resize_mode') else 'squash'
n_input_samples = 0 # number of samples we were able to load
input_ids = [] # indices of samples within file list
input_data = [] # sample data
if input_is_db:
# load images from database
reader = DbReader(input_list)
for key, value in reader.entries():
datum = caffe_pb2.Datum()
datum.ParseFromString(value)
if datum.encoded:
s = StringIO()
s.write(datum.data)
s.seek(0)
img = PIL.Image.open(s)
img = np.array(img)
else:
import caffe.io
arr = caffe.io.datum_to_array(datum)
# CHW -> HWC
arr = arr.transpose((1, 2, 0))
if arr.shape[2] == 1:
# HWC -> HW
arr = arr[:, :, 0]
elif arr.shape[2] == 3:
# BGR -> RGB
# XXX see issue #59
arr = arr[:, :, [2, 1, 0]]
img = arr
input_ids.append(key)
input_data.append(img)
n_input_samples = n_input_samples + 1
else:
# load paths from file
paths = None
with open(input_list) as infile:
paths = infile.readlines()
# load and resize images
for idx, path in enumerate(paths):
path = path.strip()
try:
image = utils.image.load_image(path.strip())
if resize:
image = utils.image.resize_image(
image,
height,
width,
channels=channels,
resize_mode=resize_mode)
else:
image = utils.image.image_to_array(
image,
channels=channels)
input_ids.append(idx)
input_data.append(image)
n_input_samples = n_input_samples + 1
except utils.errors.LoadImageError as e:
print(e)
# perform inference
visualizations = None
if n_input_samples == 0:
raise InferenceError("Unable to load any image from file '%s'" % repr(input_list))
elif n_input_samples == 1:
# single image inference
outputs, visualizations = model.train_task().infer_one(
input_data[0],
snapshot_epoch=epoch,
layers=layers,
gpu=gpu,
resize=resize)
else:
if layers != 'none':
raise InferenceError("Layer visualization is not supported for multiple inference")
outputs = model.train_task().infer_many(
input_data,
snapshot_epoch=epoch,
gpu=gpu,
resize=resize)
# write to hdf5 file
db_path = os.path.join(output_dir, 'inference.hdf5')
db = h5py.File(db_path, 'w')
# write input paths and images to database
db.create_dataset("input_ids", data=input_ids)
db.create_dataset("input_data", data=input_data)
# write outputs to database
db_outputs = db.create_group("outputs")
for output_id, output_name in enumerate(outputs.keys()):
output_data = outputs[output_name]
output_key = base64.urlsafe_b64encode(str(output_name))
dset = db_outputs.create_dataset(output_key, data=output_data)
# add ID attribute so outputs can be sorted in
# the order they appear in here
dset.attrs['id'] = output_id
# write visualization data
if visualizations is not None and len(visualizations) > 0:
db_layers = db.create_group("layers")
for idx, layer in enumerate(visualizations):
vis = layer['vis'] if layer['vis'] is not None else np.empty(0)
dset = db_layers.create_dataset(str(idx), data=vis)
dset.attrs['name'] = layer['name']
dset.attrs['vis_type'] = layer['vis_type']
if 'param_count' in layer:
dset.attrs['param_count'] = layer['param_count']
if 'layer_type' in layer:
dset.attrs['layer_type'] = layer['layer_type']
dset.attrs['shape'] = layer['data_stats']['shape']
dset.attrs['mean'] = layer['data_stats']['mean']
dset.attrs['stddev'] = layer['data_stats']['stddev']
dset.attrs['histogram_y'] = layer['data_stats']['histogram'][0]
dset.attrs['histogram_x'] = layer['data_stats']['histogram'][1]
dset.attrs['histogram_ticks'] = layer['data_stats']['histogram'][2]
db.close()
logger.info('Saved data to %s', db_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Inference tool - DIGITS')
# Positional arguments
parser.add_argument(
'input_list',
help='An input file containing paths to input data')
parser.add_argument(
'output_dir',
help='Directory to write outputs to')
parser.add_argument(
'model',
help='Model ID')
# Optional arguments
parser.add_argument(
'-e',
'--epoch',
default='-1',
help="Epoch (-1 for last)"
)
parser.add_argument(
'-j',
'--jobs_dir',
default='none',
help='Jobs directory (default: from DIGITS config)',
)
parser.add_argument(
'-l',
'--layers',
default='none',
help='Which layers to write to output ("none" [default] or "all")',
)
parser.add_argument(
'-b',
'--batch_size',
type=int,
default=1,
help='Batch size',
)
parser.add_argument(
'-g',
'--gpu',
type=int,
default=None,
help='GPU to use (as in nvidia-smi output, default: None)',
)
parser.add_argument(
'--db',
action='store_true',
help='Input file is a database',
)
parser.add_argument(
'--resize',
dest='resize',
action='store_true')
parser.add_argument(
'--no-resize',
dest='resize',
action='store_false')
parser.set_defaults(resize=True)
args = vars(parser.parse_args())
try:
infer(
args['input_list'],
args['output_dir'],
args['jobs_dir'],
args['model'],
args['epoch'],
args['batch_size'],
args['layers'],
args['gpu'],
args['db'],
args['resize']
)
except Exception as e:
logger.error('%s: %s' % (type(e).__name__, e.message))
raise
| [
"digits.utils.image.image_to_array",
"h5py.File",
"os.path.abspath",
"io.StringIO",
"argparse.ArgumentParser",
"digits.job.Job.load",
"os.path.isdir",
"caffe_pb2.Datum",
"numpy.empty",
"digits.inference.errors.InferenceError",
"digits.utils.image.resize_image",
"digits.utils.lmdbreader.DbReade... | [((718, 761), 'logging.getLogger', 'logging.getLogger', (['"""digits.tools.inference"""'], {}), "('digits.tools.inference')\n", (735, 761), False, 'import logging\n'), ((1305, 1337), 'os.path.join', 'os.path.join', (['jobs_dir', 'model_id'], {}), '(jobs_dir, model_id)\n', (1317, 1337), False, 'import os\n'), ((1349, 1373), 'os.path.isdir', 'os.path.isdir', (['model_dir'], {}), '(model_dir)\n', (1362, 1373), False, 'import os\n'), ((1429, 1448), 'digits.job.Job.load', 'Job.load', (['model_dir'], {}), '(model_dir)\n', (1437, 1448), False, 'from digits.job import Job\n'), ((1491, 1531), 'os.path.join', 'os.path.join', (['jobs_dir', 'model.dataset_id'], {}), '(jobs_dir, model.dataset_id)\n', (1503, 1531), False, 'import os\n'), ((1543, 1569), 'os.path.isdir', 'os.path.isdir', (['dataset_dir'], {}), '(dataset_dir)\n', (1556, 1569), False, 'import os\n'), ((1631, 1652), 'digits.job.Job.load', 'Job.load', (['dataset_dir'], {}), '(dataset_dir)\n', (1639, 1652), False, 'from digits.job import Job\n'), ((5395, 5437), 'os.path.join', 'os.path.join', (['output_dir', '"""inference.hdf5"""'], {}), "(output_dir, 'inference.hdf5')\n", (5407, 5437), False, 'import os\n'), ((5447, 5470), 'h5py.File', 'h5py.File', (['db_path', '"""w"""'], {}), "(db_path, 'w')\n", (5456, 5470), False, 'import h5py\n'), ((7235, 7297), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Inference tool - DIGITS"""'}), "(description='Inference tool - DIGITS')\n", (7258, 7297), False, 'import argparse\n'), ((2735, 2755), 'digits.utils.lmdbreader.DbReader', 'DbReader', (['input_list'], {}), '(input_list)\n', (2743, 2755), False, 'from digits.utils.lmdbreader import DbReader\n'), ((2820, 2837), 'caffe_pb2.Datum', 'caffe_pb2.Datum', ([], {}), '()\n', (2835, 2837), False, 'import caffe_pb2\n'), ((386, 411), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (401, 411), False, 'import os\n'), ((2929, 2939), 'io.StringIO', 'StringIO', ([], {}), '()\n', (2937, 2939), False, 'from io import StringIO\n'), ((3064, 3077), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (3072, 3077), True, 'import numpy as np\n'), ((5122, 5199), 'digits.inference.errors.InferenceError', 'InferenceError', (['"""Layer visualization is not supported for multiple inference"""'], {}), "('Layer visualization is not supported for multiple inference')\n", (5136, 5199), False, 'from digits.inference.errors import InferenceError\n'), ((6329, 6340), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (6337, 6340), True, 'import numpy as np\n'), ((4054, 4148), 'digits.utils.image.resize_image', 'utils.image.resize_image', (['image', 'height', 'width'], {'channels': 'channels', 'resize_mode': 'resize_mode'}), '(image, height, width, channels=channels,\n resize_mode=resize_mode)\n', (4078, 4148), False, 'from digits import utils, log\n'), ((4316, 4368), 'digits.utils.image.image_to_array', 'utils.image.image_to_array', (['image'], {'channels': 'channels'}), '(image, channels=channels)\n', (4342, 4368), False, 'from digits import utils, log\n')] |
from __future__ import division
from __future__ import print_function
import time
import os
import warnings
# Train on CPU (hide GPU) due to memory constraints
os.environ['CUDA_VISIBLE_DEVICES'] = ""
import tensorflow as tf
from tensorboard.plugins.hparams import api as hp
import numpy as np
import scipy.sparse as sp
from optimizer import OptimizerAE, OptimizerVAE
from input_data import load_data
from model import GCNModelAE, GCNModelVAE
from preprocessing import preprocess_graph, sparse_to_tuple, gen_train_val_test_sets
from train import train_test_model
from outputs import save_adj
# Settings
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_integer('verbose', 1, 'Verbosity of output from low (0) to high (1)')
flags.DEFINE_float('learning_rate', 0.00001, 'Initial learning rate.')
flags.DEFINE_integer('epochs', 1000, 'Number of max epochs to train.')
flags.DEFINE_integer('hidden1', 64, 'Number of units in hidden layer 1.')
flags.DEFINE_integer('hidden2', 48, 'Number of units in hidden layer 2.')
flags.DEFINE_float('weight_decay', 0., 'Weight for L2 loss on embedding matrix.')
flags.DEFINE_float('dropout', 0., 'Dropout rate (1 - keep probability).')
flags.DEFINE_integer('early_stopping', 5, 'Tolerance for early stopping (# of epochs).')
flags.DEFINE_string('model', 'gcn_ae', 'Model string.')
flags.DEFINE_float('ratio_val', 0.2, 'Ratio of edges used for validation metrics.')
flags.DEFINE_float('ratio_test', 0.1, 'Ratio of edges used for test metrics.')
flags.DEFINE_integer('balanced_metrics', 1, 'Whether to use balanced metrics (1) or not (0).')
flags.DEFINE_string('dataset', 'gasch_GSE102475', 'Dataset file name.')
flags.DEFINE_string('ground_truth', 'yeast_chipunion_KDUnion_intersect', 'Gold standard edges file name.')
flags.DEFINE_string('inFilePath', None, 'Input Files path')
flags.DEFINE_string('outFilePath', None, 'Output Files path')
flags.DEFINE_integer('features', 1, 'Whether to use features (1) or not (0).')
flags.DEFINE_integer('random_prior', 0, 'When prior adjacency matrix should be set to random matrix (1) or not (0).')
flags.DEFINE_integer('crossvalidation', 0, 'Whether to use crossvalidation (1) or not (0).')
flags.DEFINE_integer('hp_optimization', 0, 'Whether to start the hyperparameter optimization run (1) or not (0).')
model_str = FLAGS.model
model_timestamp = time.strftime("%Y%m%d_%H%M%S") + '_' + FLAGS.dataset + '_' + FLAGS.ground_truth
if FLAGS.verbose == 0:
warnings.filterwarnings("ignore")
if not os.path.exists('logs'):
os.mkdir('logs')
if not os.path.exists('logs/outputs'):
os.mkdir('logs/outputs')
if not os.path.exists('logs/training_plots'):
os.mkdir('logs/training_plots')
# Load data
if FLAGS.inFilePath is None:
norm_expression_path = 'data/normalized_expression/' + FLAGS.dataset + '.csv'
gold_standard_path = 'data/gold_standards/' + FLAGS.ground_truth + '.txt'
else:
norm_expression_path = FLAGS.inFilePath + 'ExpressionData' + '.csv'
gold_standard_path = FLAGS.inFilePath + 'PriorNetwork' + '.txt'
adj, features, gene_names = load_data(norm_expression_path, gold_standard_path, model_timestamp, FLAGS.random_prior)
# Store original adjacency matrix (without diagonal entries) for later
adj_orig = adj
adj_orig = adj_orig - sp.dia_matrix((adj_orig.diagonal()[np.newaxis, :], [0]), shape=adj_orig.shape)
adj_orig.eliminate_zeros()
adj_train, crossval_edges, test_edges, test_edges_false = gen_train_val_test_sets(adj_orig, FLAGS.crossvalidation, FLAGS.balanced_metrics, FLAGS.ratio_val, FLAGS.ratio_test)
adj = adj_train
if FLAGS.features == 0:
print("Running without features")
features = sp.identity(features.shape[0]) # featureless
# Some preprocessing
adj_norm = [preprocess_graph(m) for m in adj]
adj_label = [(m + sp.eye(m.shape[0])) for m in adj_train]
#np.savetxt('logs/outputs/' + model_timestamp + '_adj_train.csv', adj_label[-1].toarray(), delimiter=";")
adj_label = [sparse_to_tuple(m) for m in adj_label]
features = sparse_to_tuple(features.tocoo())
def build_tf_graph(model_str, features, adj):
# Define placeholders
placeholders = {
'features': tf.compat.v1.sparse_placeholder(tf.float32),
'adj': tf.compat.v1.sparse_placeholder(tf.float32),
'adj_orig': tf.compat.v1.sparse_placeholder(tf.float32),
'dropout': tf.compat.v1.placeholder_with_default(0., shape=())
}
num_features = features[2][1]
features_nonzero = features[1].shape[0]
num_nodes = adj[0].shape[0]
# Create model
model = None
if model_str == 'gcn_ae':
model = GCNModelAE(placeholders, num_features, features_nonzero)
elif model_str == 'gcn_vae':
model = GCNModelVAE(placeholders, num_features, num_nodes, features_nonzero)
pos_weight = float(adj[0].shape[0] * adj[0].shape[0] - adj[0].sum()) / adj[0].sum()
norm = adj[0].shape[0] * adj[0].shape[0] / float((adj[0].shape[0] * adj[0].shape[0] - adj[0].sum()) * 2)
# Optimizer
with tf.name_scope('optimizer'):
if model_str == 'gcn_ae':
opt = OptimizerAE(preds=model.reconstructions,
labels=tf.reshape(tf.sparse.to_dense(placeholders['adj_orig'],
validate_indices=False), [-1]),
pos_weight=pos_weight,
norm=norm)
elif model_str == 'gcn_vae':
opt = OptimizerVAE(preds=model.reconstructions,
labels=tf.reshape(tf.sparse.to_dense(placeholders['adj_orig'],
validate_indices=False), [-1]),
model=model, num_nodes=num_nodes,
pos_weight=pos_weight,
norm=norm)
return placeholders, model, opt
#Build, train and test model
adj_pred = None
if FLAGS.hp_optimization:
if not os.path.exists('logs/hparam_tuning'):
os.mkdir('logs/hparam_tuning')
#Hyperparameter Optimization
HP_NUM_UNITS1 = hp.HParam('num_units1', hp.Discrete([2, 5, 8, 12, 16, 32, 64, 128]))
HP_RATIO_UNITS2 = hp.HParam('ratio_units2', hp.Discrete([0.1, 0.25, 0.4, 0.65, 0.8]))
HP_LR = hp.HParam('lr', hp.Discrete([0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1]))
session_num = 0
for num_units1 in HP_NUM_UNITS1.domain.values:
for ratio_units2 in HP_RATIO_UNITS2.domain.values:
for lr in HP_LR.domain.values:
hparams = {
HP_NUM_UNITS1: num_units1,
HP_RATIO_UNITS2: ratio_units2,
HP_LR: lr,
}
FLAGS.learning_rate = hparams[HP_LR]
FLAGS.hidden1 = hparams[HP_NUM_UNITS1]
FLAGS.hidden2 = int(np.ceil(hparams[HP_RATIO_UNITS2]*hparams[HP_NUM_UNITS1]))
run_name = "run" + str(session_num) + "_" + model_str + "_hid1-" + str(FLAGS.hidden1) + "_hid2-" + str(FLAGS.hidden2) + "_lr-" + str(FLAGS.learning_rate)
print('--- Starting trial %d' % session_num)
print({h.name: hparams[h] for h in hparams})
tf.compat.v1.reset_default_graph()
placeholders, model, opt = build_tf_graph(model_str, features, adj)
acc, ap, roc, adj_pred = train_test_model(adj_norm, adj_label, features, adj_orig, FLAGS, crossval_edges,
placeholders, opt, model, model_str, (model_timestamp + '_' + run_name),
adj, test_edges, test_edges_false)
session_num += 1
#Save output adj matrix and gene interaction list
save_adj(adj_pred, FLAGS.outFilePath, model_timestamp, gene_names)
else:
#Run model with given hyperparameters
placeholders, model, opt = build_tf_graph(model_str, features, adj)
model_timestamp = model_timestamp + "_" + model_str + "_hid1-" + str(FLAGS.hidden1) + "_hid2-" + str(FLAGS.hidden2) + "_lr-" + str(FLAGS.learning_rate)
_, _, _, adj_pred = train_test_model(adj_norm, adj_label, features, adj_orig, FLAGS, crossval_edges,
placeholders, opt, model, model_str, model_timestamp,
adj, test_edges, test_edges_false)
#Save output adj matrix and gene interaction list
save_adj(adj_pred, FLAGS.outFilePath, model_timestamp, gene_names)
| [
"os.mkdir",
"input_data.load_data",
"time.strftime",
"tensorflow.compat.v1.placeholder_with_default",
"scipy.sparse.eye",
"outputs.save_adj",
"preprocessing.preprocess_graph",
"tensorflow.sparse.to_dense",
"os.path.exists",
"train.train_test_model",
"scipy.sparse.identity",
"model.GCNModelAE",... | [((3056, 3149), 'input_data.load_data', 'load_data', (['norm_expression_path', 'gold_standard_path', 'model_timestamp', 'FLAGS.random_prior'], {}), '(norm_expression_path, gold_standard_path, model_timestamp, FLAGS.\n random_prior)\n', (3065, 3149), False, 'from input_data import load_data\n'), ((3419, 3539), 'preprocessing.gen_train_val_test_sets', 'gen_train_val_test_sets', (['adj_orig', 'FLAGS.crossvalidation', 'FLAGS.balanced_metrics', 'FLAGS.ratio_val', 'FLAGS.ratio_test'], {}), '(adj_orig, FLAGS.crossvalidation, FLAGS.\n balanced_metrics, FLAGS.ratio_val, FLAGS.ratio_test)\n', (3442, 3539), False, 'from preprocessing import preprocess_graph, sparse_to_tuple, gen_train_val_test_sets\n'), ((2442, 2475), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (2465, 2475), False, 'import warnings\n'), ((2484, 2506), 'os.path.exists', 'os.path.exists', (['"""logs"""'], {}), "('logs')\n", (2498, 2506), False, 'import os\n'), ((2512, 2528), 'os.mkdir', 'os.mkdir', (['"""logs"""'], {}), "('logs')\n", (2520, 2528), False, 'import os\n'), ((2536, 2566), 'os.path.exists', 'os.path.exists', (['"""logs/outputs"""'], {}), "('logs/outputs')\n", (2550, 2566), False, 'import os\n'), ((2572, 2596), 'os.mkdir', 'os.mkdir', (['"""logs/outputs"""'], {}), "('logs/outputs')\n", (2580, 2596), False, 'import os\n'), ((2604, 2641), 'os.path.exists', 'os.path.exists', (['"""logs/training_plots"""'], {}), "('logs/training_plots')\n", (2618, 2641), False, 'import os\n'), ((2647, 2678), 'os.mkdir', 'os.mkdir', (['"""logs/training_plots"""'], {}), "('logs/training_plots')\n", (2655, 2678), False, 'import os\n'), ((3629, 3659), 'scipy.sparse.identity', 'sp.identity', (['features.shape[0]'], {}), '(features.shape[0])\n', (3640, 3659), True, 'import scipy.sparse as sp\n'), ((3709, 3728), 'preprocessing.preprocess_graph', 'preprocess_graph', (['m'], {}), '(m)\n', (3725, 3728), False, 'from preprocessing import preprocess_graph, sparse_to_tuple, gen_train_val_test_sets\n'), ((3921, 3939), 'preprocessing.sparse_to_tuple', 'sparse_to_tuple', (['m'], {}), '(m)\n', (3936, 3939), False, 'from preprocessing import preprocess_graph, sparse_to_tuple, gen_train_val_test_sets\n'), ((8069, 8246), 'train.train_test_model', 'train_test_model', (['adj_norm', 'adj_label', 'features', 'adj_orig', 'FLAGS', 'crossval_edges', 'placeholders', 'opt', 'model', 'model_str', 'model_timestamp', 'adj', 'test_edges', 'test_edges_false'], {}), '(adj_norm, adj_label, features, adj_orig, FLAGS,\n crossval_edges, placeholders, opt, model, model_str, model_timestamp,\n adj, test_edges, test_edges_false)\n', (8085, 8246), False, 'from train import train_test_model\n'), ((8368, 8434), 'outputs.save_adj', 'save_adj', (['adj_pred', 'FLAGS.outFilePath', 'model_timestamp', 'gene_names'], {}), '(adj_pred, FLAGS.outFilePath, model_timestamp, gene_names)\n', (8376, 8434), False, 'from outputs import save_adj\n'), ((3762, 3780), 'scipy.sparse.eye', 'sp.eye', (['m.shape[0]'], {}), '(m.shape[0])\n', (3768, 3780), True, 'import scipy.sparse as sp\n'), ((4120, 4163), 'tensorflow.compat.v1.sparse_placeholder', 'tf.compat.v1.sparse_placeholder', (['tf.float32'], {}), '(tf.float32)\n', (4151, 4163), True, 'import tensorflow as tf\n'), ((4180, 4223), 'tensorflow.compat.v1.sparse_placeholder', 'tf.compat.v1.sparse_placeholder', (['tf.float32'], {}), '(tf.float32)\n', (4211, 4223), True, 'import tensorflow as tf\n'), ((4245, 4288), 'tensorflow.compat.v1.sparse_placeholder', 'tf.compat.v1.sparse_placeholder', (['tf.float32'], {}), '(tf.float32)\n', (4276, 4288), True, 'import tensorflow as tf\n'), ((4309, 4361), 'tensorflow.compat.v1.placeholder_with_default', 'tf.compat.v1.placeholder_with_default', (['(0.0)'], {'shape': '()'}), '(0.0, shape=())\n', (4346, 4361), True, 'import tensorflow as tf\n'), ((4561, 4617), 'model.GCNModelAE', 'GCNModelAE', (['placeholders', 'num_features', 'features_nonzero'], {}), '(placeholders, num_features, features_nonzero)\n', (4571, 4617), False, 'from model import GCNModelAE, GCNModelVAE\n'), ((4960, 4986), 'tensorflow.name_scope', 'tf.name_scope', (['"""optimizer"""'], {}), "('optimizer')\n", (4973, 4986), True, 'import tensorflow as tf\n'), ((5954, 5990), 'os.path.exists', 'os.path.exists', (['"""logs/hparam_tuning"""'], {}), "('logs/hparam_tuning')\n", (5968, 5990), False, 'import os\n'), ((6000, 6030), 'os.mkdir', 'os.mkdir', (['"""logs/hparam_tuning"""'], {}), "('logs/hparam_tuning')\n", (6008, 6030), False, 'import os\n'), ((6117, 6160), 'tensorboard.plugins.hparams.api.Discrete', 'hp.Discrete', (['[2, 5, 8, 12, 16, 32, 64, 128]'], {}), '([2, 5, 8, 12, 16, 32, 64, 128])\n', (6128, 6160), True, 'from tensorboard.plugins.hparams import api as hp\n'), ((6210, 6250), 'tensorboard.plugins.hparams.api.Discrete', 'hp.Discrete', (['[0.1, 0.25, 0.4, 0.65, 0.8]'], {}), '([0.1, 0.25, 0.4, 0.65, 0.8])\n', (6221, 6250), True, 'from tensorboard.plugins.hparams import api as hp\n'), ((6280, 6337), 'tensorboard.plugins.hparams.api.Discrete', 'hp.Discrete', (['[0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1]'], {}), '([0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1])\n', (6291, 6337), True, 'from tensorboard.plugins.hparams import api as hp\n'), ((4667, 4735), 'model.GCNModelVAE', 'GCNModelVAE', (['placeholders', 'num_features', 'num_nodes', 'features_nonzero'], {}), '(placeholders, num_features, num_nodes, features_nonzero)\n', (4678, 4735), False, 'from model import GCNModelAE, GCNModelVAE\n'), ((2334, 2364), 'time.strftime', 'time.strftime', (['"""%Y%m%d_%H%M%S"""'], {}), "('%Y%m%d_%H%M%S')\n", (2347, 2364), False, 'import time\n'), ((7158, 7192), 'tensorflow.compat.v1.reset_default_graph', 'tf.compat.v1.reset_default_graph', ([], {}), '()\n', (7190, 7192), True, 'import tensorflow as tf\n'), ((7310, 7504), 'train.train_test_model', 'train_test_model', (['adj_norm', 'adj_label', 'features', 'adj_orig', 'FLAGS', 'crossval_edges', 'placeholders', 'opt', 'model', 'model_str', "(model_timestamp + '_' + run_name)", 'adj', 'test_edges', 'test_edges_false'], {}), "(adj_norm, adj_label, features, adj_orig, FLAGS,\n crossval_edges, placeholders, opt, model, model_str, model_timestamp +\n '_' + run_name, adj, test_edges, test_edges_false)\n", (7326, 7504), False, 'from train import train_test_model\n'), ((7695, 7761), 'outputs.save_adj', 'save_adj', (['adj_pred', 'FLAGS.outFilePath', 'model_timestamp', 'gene_names'], {}), '(adj_pred, FLAGS.outFilePath, model_timestamp, gene_names)\n', (7703, 7761), False, 'from outputs import save_adj\n'), ((6795, 6853), 'numpy.ceil', 'np.ceil', (['(hparams[HP_RATIO_UNITS2] * hparams[HP_NUM_UNITS1])'], {}), '(hparams[HP_RATIO_UNITS2] * hparams[HP_NUM_UNITS1])\n', (6802, 6853), True, 'import numpy as np\n'), ((5129, 5197), 'tensorflow.sparse.to_dense', 'tf.sparse.to_dense', (["placeholders['adj_orig']"], {'validate_indices': '(False)'}), "(placeholders['adj_orig'], validate_indices=False)\n", (5147, 5197), True, 'import tensorflow as tf\n'), ((5520, 5588), 'tensorflow.sparse.to_dense', 'tf.sparse.to_dense', (["placeholders['adj_orig']"], {'validate_indices': '(False)'}), "(placeholders['adj_orig'], validate_indices=False)\n", (5538, 5588), True, 'import tensorflow as tf\n')] |
"""Testing for Spectral Clustering methods"""
from cPickle import dumps, loads
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances, adjusted_rand_score
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import LabelBinarizer
def test_spectral_clustering():
S = np.array([[1, 5, 2, 1, 0, 0, 0],
[5, 1, 3, 1, 0, 0, 0],
[2, 3, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 2, 1, 1],
[0, 0, 0, 2, 2, 3, 2],
[0, 0, 0, 1, 3, 1, 4],
[0, 0, 0, 1, 2, 4, 1],
])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.random_state.get_state()[1],
model.random_state.get_state()[1])
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_lobpcg_mode():
# Test the lobpcg mode of SpectralClustering
# We need a fairly big data matrix, as lobpcg does not work with
# small data matrices
centers = np.array([
[0., 0.],
[10., 10.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="lobpcg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
# We need a large matrice, or the lobpcg solver will fallback to its
# non-sparse and buggy mode
S = np.array([[1, 5, 2, 2, 1, 0, 0, 0, 0, 0],
[5, 1, 3, 2, 1, 0, 0, 0, 0, 0],
[2, 3, 1, 1, 1, 0, 0, 0, 0, 0],
[2, 2, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 2, 1, 1, 1],
[0, 0, 0, 0, 1, 2, 2, 3, 3, 2],
[0, 0, 0, 0, 2, 2, 3, 3, 3, 4],
[0, 0, 0, 0, 1, 3, 3, 1, 2, 4],
[0, 0, 0, 0, 1, 3, 3, 2, 1, 4],
[0, 0, 0, 0, 1, 2, 4, 4, 4, 1],
])
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
if labels[0] == 0:
labels = 1 - labels
assert_greater(np.mean(labels == [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]), .89)
def test_affinities():
X, y = make_blobs(n_samples=40, random_state=1, centers=[[1, 1], [-1, -1]],
cluster_std=0.4)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.todense()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| [
"sklearn.utils.testing.assert_raises",
"sklearn.cluster.SpectralClustering",
"sklearn.metrics.pairwise_distances",
"sklearn.utils.testing.assert_equal",
"numpy.ones",
"numpy.random.RandomState",
"cPickle.dumps",
"scipy.sparse.coo_matrix",
"numpy.max",
"numpy.array",
"numpy.mean",
"sklearn.clus... | [((714, 894), 'numpy.array', 'np.array', (['[[1, 5, 2, 1, 0, 0, 0], [5, 1, 3, 1, 0, 0, 0], [2, 3, 1, 1, 0, 0, 0], [1, 1,\n 1, 1, 2, 1, 1], [0, 0, 0, 2, 2, 3, 2], [0, 0, 0, 1, 3, 1, 4], [0, 0, 0,\n 1, 2, 4, 1]]'], {}), '([[1, 5, 2, 1, 0, 0, 0], [5, 1, 3, 1, 0, 0, 0], [2, 3, 1, 1, 0, 0, \n 0], [1, 1, 1, 1, 2, 1, 1], [0, 0, 0, 2, 2, 3, 2], [0, 0, 0, 1, 3, 1, 4],\n [0, 0, 0, 1, 2, 4, 1]])\n', (722, 894), True, 'import numpy as np\n'), ((2284, 2320), 'numpy.array', 'np.array', (['[[0.0, 0.0], [10.0, 10.0]]'], {}), '([[0.0, 0.0], [10.0, 10.0]])\n', (2292, 2320), True, 'import numpy as np\n'), ((2361, 2437), 'sklearn.datasets.samples_generator.make_blobs', 'make_blobs', ([], {'n_samples': '(100)', 'centers': 'centers', 'cluster_std': '(1.0)', 'random_state': '(42)'}), '(n_samples=100, centers=centers, cluster_std=1.0, random_state=42)\n', (2371, 2437), False, 'from sklearn.datasets.samples_generator import make_blobs\n'), ((2477, 2498), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['X'], {}), '(X)\n', (2495, 2498), False, 'from sklearn.metrics import pairwise_distances, adjusted_rand_score\n'), ((2981, 3048), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0], [10.0, 10.0, 10.0], [20.0, 20.0, 20.0]]'], {}), '([[0.0, 0.0, 0.0], [10.0, 10.0, 10.0], [20.0, 20.0, 20.0]])\n', (2989, 3048), True, 'import numpy as np\n'), ((3092, 3168), 'sklearn.datasets.samples_generator.make_blobs', 'make_blobs', ([], {'n_samples': '(100)', 'centers': 'centers', 'cluster_std': '(1.0)', 'random_state': '(42)'}), '(n_samples=100, centers=centers, cluster_std=1.0, random_state=42)\n', (3102, 3168), False, 'from sklearn.datasets.samples_generator import make_blobs\n'), ((3208, 3229), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['X'], {}), '(X)\n', (3226, 3229), False, 'from sklearn.metrics import pairwise_distances, adjusted_rand_score\n'), ((3300, 3320), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['S'], {}), '(S)\n', (3317, 3320), False, 'from scipy import sparse\n'), ((4116, 4183), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0], [10.0, 10.0, 10.0], [20.0, 20.0, 20.0]]'], {}), '([[0.0, 0.0, 0.0], [10.0, 10.0, 10.0], [20.0, 20.0, 20.0]])\n', (4124, 4183), True, 'import numpy as np\n'), ((4227, 4303), 'sklearn.datasets.samples_generator.make_blobs', 'make_blobs', ([], {'n_samples': '(100)', 'centers': 'centers', 'cluster_std': '(1.0)', 'random_state': '(42)'}), '(n_samples=100, centers=centers, cluster_std=1.0, random_state=42)\n', (4237, 4303), False, 'from sklearn.datasets.samples_generator import make_blobs\n'), ((4343, 4364), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['X'], {}), '(X)\n', (4361, 4364), False, 'from sklearn.metrics import pairwise_distances, adjusted_rand_score\n'), ((4435, 4455), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['S'], {}), '(S)\n', (4452, 4455), False, 'from scipy import sparse\n'), ((4460, 4569), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'spectral_clustering', 'S'], {'n_clusters': '(2)', 'random_state': '(0)', 'eigen_solver': '"""<unknown>"""'}), "(ValueError, spectral_clustering, S, n_clusters=2,\n random_state=0, eigen_solver='<unknown>')\n", (4473, 4569), False, 'from sklearn.utils.testing import assert_raises\n'), ((4710, 4777), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0], [10.0, 10.0, 10.0], [20.0, 20.0, 20.0]]'], {}), '([[0.0, 0.0, 0.0], [10.0, 10.0, 10.0], [20.0, 20.0, 20.0]])\n', (4718, 4777), True, 'import numpy as np\n'), ((4821, 4897), 'sklearn.datasets.samples_generator.make_blobs', 'make_blobs', ([], {'n_samples': '(100)', 'centers': 'centers', 'cluster_std': '(1.0)', 'random_state': '(42)'}), '(n_samples=100, centers=centers, cluster_std=1.0, random_state=42)\n', (4831, 4897), False, 'from sklearn.datasets.samples_generator import make_blobs\n'), ((4937, 4958), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['X'], {}), '(X)\n', (4955, 4958), False, 'from sklearn.metrics import pairwise_distances, adjusted_rand_score\n'), ((5029, 5049), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['S'], {}), '(S)\n', (5046, 5049), False, 'from scipy import sparse\n'), ((5054, 5164), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'spectral_clustering', 'S'], {'n_clusters': '(2)', 'random_state': '(0)', 'assign_labels': '"""<unknown>"""'}), "(ValueError, spectral_clustering, S, n_clusters=2,\n random_state=0, assign_labels='<unknown>')\n", (5067, 5164), False, 'from sklearn.utils.testing import assert_raises\n'), ((5333, 5680), 'numpy.array', 'np.array', (['[[1, 5, 2, 2, 1, 0, 0, 0, 0, 0], [5, 1, 3, 2, 1, 0, 0, 0, 0, 0], [2, 3, 1, \n 1, 1, 0, 0, 0, 0, 0], [2, 2, 1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1,\n 2, 1, 1, 1], [0, 0, 0, 0, 1, 2, 2, 3, 3, 2], [0, 0, 0, 0, 2, 2, 3, 3, 3,\n 4], [0, 0, 0, 0, 1, 3, 3, 1, 2, 4], [0, 0, 0, 0, 1, 3, 3, 2, 1, 4], [0,\n 0, 0, 0, 1, 2, 4, 4, 4, 1]]'], {}), '([[1, 5, 2, 2, 1, 0, 0, 0, 0, 0], [5, 1, 3, 2, 1, 0, 0, 0, 0, 0], [\n 2, 3, 1, 1, 1, 0, 0, 0, 0, 0], [2, 2, 1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1,\n 1, 1, 1, 2, 1, 1, 1], [0, 0, 0, 0, 1, 2, 2, 3, 3, 2], [0, 0, 0, 0, 2, 2,\n 3, 3, 3, 4], [0, 0, 0, 0, 1, 3, 3, 1, 2, 4], [0, 0, 0, 0, 1, 3, 3, 2, 1,\n 4], [0, 0, 0, 0, 1, 2, 4, 4, 4, 1]])\n', (5341, 5680), True, 'import numpy as np\n'), ((5855, 5875), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['S'], {}), '(S)\n', (5872, 5875), False, 'from scipy import sparse\n'), ((6173, 6262), 'sklearn.datasets.samples_generator.make_blobs', 'make_blobs', ([], {'n_samples': '(40)', 'random_state': '(1)', 'centers': '[[1, 1], [-1, -1]]', 'cluster_std': '(0.4)'}), '(n_samples=40, random_state=1, centers=[[1, 1], [-1, -1]],\n cluster_std=0.4)\n', (6183, 6262), False, 'from sklearn.datasets.samples_generator import make_blobs\n'), ((6323, 6401), 'sklearn.cluster.SpectralClustering', 'SpectralClustering', ([], {'n_clusters': '(2)', 'affinity': '"""nearest_neighbors"""', 'random_state': '(0)'}), "(n_clusters=2, affinity='nearest_neighbors', random_state=0)\n", (6341, 6401), False, 'from sklearn.cluster import SpectralClustering, spectral_clustering\n'), ((6523, 6580), 'sklearn.cluster.SpectralClustering', 'SpectralClustering', ([], {'n_clusters': '(2)', 'gamma': '(2)', 'random_state': '(0)'}), '(n_clusters=2, gamma=2, random_state=0)\n', (6541, 6580), False, 'from sklearn.cluster import SpectralClustering, spectral_clustering\n'), ((6712, 6766), 'sklearn.cluster.SpectralClustering', 'SpectralClustering', ([], {'n_clusters': '(2)', 'affinity': '"""<unknown>"""'}), "(n_clusters=2, affinity='<unknown>')\n", (6730, 6766), False, 'from sklearn.cluster import SpectralClustering, spectral_clustering\n'), ((6771, 6807), 'sklearn.utils.testing.assert_raises', 'assert_raises', (['ValueError', 'sp.fit', 'X'], {}), '(ValueError, sp.fit, X)\n', (6784, 6807), False, 'from sklearn.utils.testing import assert_raises\n'), ((6916, 6943), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (6937, 6943), True, 'import numpy as np\n'), ((2526, 2535), 'numpy.max', 'np.max', (['D'], {}), '(D)\n', (2532, 2535), True, 'import numpy as np\n'), ((2853, 2883), 'numpy.mean', 'np.mean', (['(labels == true_labels)'], {}), '(labels == true_labels)\n', (2860, 2883), True, 'import numpy as np\n'), ((3257, 3266), 'numpy.max', 'np.max', (['D'], {}), '(D)\n', (3263, 3266), True, 'import numpy as np\n'), ((4392, 4401), 'numpy.max', 'np.max', (['D'], {}), '(D)\n', (4398, 4401), True, 'import numpy as np\n'), ((4986, 4995), 'numpy.max', 'np.max', (['D'], {}), '(D)\n', (4992, 4995), True, 'import numpy as np\n'), ((6081, 6130), 'numpy.mean', 'np.mean', (['(labels == [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])'], {}), '(labels == [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])\n', (6088, 6130), True, 'import numpy as np\n'), ((6478, 6508), 'sklearn.metrics.adjusted_rand_score', 'adjusted_rand_score', (['y', 'labels'], {}), '(y, labels)\n', (6497, 6508), False, 'from sklearn.metrics import pairwise_distances, adjusted_rand_score\n'), ((6629, 6659), 'sklearn.metrics.adjusted_rand_score', 'adjusted_rand_score', (['y', 'labels'], {}), '(y, labels)\n', (6648, 6659), False, 'from sklearn.metrics import pairwise_distances, adjusted_rand_score\n'), ((3789, 3819), 'numpy.mean', 'np.mean', (['(labels == true_labels)'], {}), '(labels == true_labels)\n', (3796, 3819), True, 'import numpy as np\n'), ((7151, 7177), 'numpy.array', 'np.array', (['y_true', 'np.float'], {}), '(y_true, np.float)\n', (7159, 7177), True, 'import numpy as np\n'), ((7742, 7780), 'sklearn.cluster.spectral.discretize', 'discretize', (['y_true_noisy', 'random_state'], {}), '(y_true_noisy, random_state)\n', (7752, 7780), False, 'from sklearn.cluster.spectral import discretize\n'), ((1143, 1163), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['S'], {}), '(S)\n', (1160, 1163), False, 'from scipy import sparse\n'), ((1632, 1681), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['labels', '[1, 1, 1, 0, 0, 0, 0]'], {}), '(labels, [1, 1, 1, 0, 0, 0, 0])\n', (1650, 1681), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((1748, 1801), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['model_copy.n_clusters', 'model.n_clusters'], {}), '(model_copy.n_clusters, model.n_clusters)\n', (1760, 1801), False, 'from sklearn.utils.testing import assert_equal\n'), ((1818, 1875), 'sklearn.utils.testing.assert_equal', 'assert_equal', (['model_copy.eigen_solver', 'model.eigen_solver'], {}), '(model_copy.eigen_solver, model.eigen_solver)\n', (1830, 1875), False, 'from sklearn.utils.testing import assert_equal\n'), ((2037, 2090), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['model_copy.labels_', 'model.labels_'], {}), '(model_copy.labels_, model.labels_)\n', (2055, 2090), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((5890, 5962), 'sklearn.cluster.SpectralClustering', 'SpectralClustering', ([], {'random_state': '(0)', 'n_clusters': '(2)', 'affinity': '"""precomputed"""'}), "(random_state=0, n_clusters=2, affinity='precomputed')\n", (5908, 5962), False, 'from sklearn.cluster import SpectralClustering, spectral_clustering\n'), ((7808, 7843), 'sklearn.metrics.adjusted_rand_score', 'adjusted_rand_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (7827, 7843), False, 'from sklearn.metrics import pairwise_distances, adjusted_rand_score\n'), ((1718, 1730), 'cPickle.dumps', 'dumps', (['model'], {}), '(model)\n', (1723, 1730), False, 'from cPickle import dumps, loads\n'), ((7267, 7285), 'numpy.ones', 'np.ones', (['n_samples'], {}), '(n_samples)\n', (7274, 7285), True, 'import numpy as np\n'), ((1190, 1322), 'sklearn.cluster.SpectralClustering', 'SpectralClustering', ([], {'random_state': '(0)', 'n_clusters': '(2)', 'affinity': '"""precomputed"""', 'eigen_solver': 'eigen_solver', 'assign_labels': 'assign_labels'}), "(random_state=0, n_clusters=2, affinity='precomputed',\n eigen_solver=eigen_solver, assign_labels=assign_labels)\n", (1208, 1322), False, 'from sklearn.cluster import SpectralClustering, spectral_clustering\n'), ((7332, 7352), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (7341, 7352), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""templates.py -- A set of predefined "base" prospector model specifications
that can be used as a starting point and then combined or altered.
"""
from copy import deepcopy
import numpy as np
from . import priors
from . import transforms
__all__ = ["TemplateLibrary",
"describe",
"adjust_dirichlet_agebins",
"adjust_continuity_agebins",
]
class Directory(object):
"""A dict-like that only returns copies of the dictionary values.
It also includes a dictionary of information describing each entry in the
directory.
"""
def __init__(self):
self._entries = {}
self._descriptions = {}
try:
self.iteritems = self._entries.iteritems
except AttributeError:
self.iteritems = self._entries.items
def __getitem__(self, k):
return deepcopy(self._entries[k])
def __setitem__(self, k, v):
entry, description = v
self._entries[k] = entry
self._descriptions[k] = description
def describe(self, k):
print(describe(self._entries[k]))
def show_contents(self):
for k, v in list(self._descriptions.items()):
print("'{}':\n {}".format(k, v))
def describe(parset):
ttext = "Free Parameters: (name: prior) \n-----------\n"
free = ["{}: {}".format(k, v["prior"])
for k, v in list(parset.items()) if v["isfree"]]
ttext += " " + "\n ".join(free)
ftext = "Fixed Parameters: (name: value [, depends_on]) \n-----------\n"
fixed = ["{}: {} {}".format(k, v["init"], v.get("depends_on", ""))
for k, v in list(parset.items()) if not v["isfree"]]
ftext += " " + "\n ".join(fixed)
return ttext + "\n\n" + ftext
def adjust_dirichlet_agebins(parset, agelims=[0., 8., 9., 10.]):
"""Given a list of limits in age for bins, adjust the parameter
specifications to work for those limits.
:param parset:
The parameter specification dictionary to adjust. Must have entries (keys) for
"mass", "agebins", "zfraction"
:param agelims:
An iterable fo bin edges, in log(yrs) of lookback time.
"""
agebins = np.array([agelims[:-1], agelims[1:]]).T
ncomp = len(agelims) - 1
# constant SFR
zinit = np.array([(i-1)/float(i) for i in range(ncomp, 1, -1)])
# Set up the prior in `z` variables that corresponds to a dirichlet in sfr
# fraction. THIS IS IMPORTANT
alpha = np.arange(ncomp-1, 0, -1)
zprior = priors.Beta(alpha=alpha, beta=np.ones_like(alpha), mini=0.0, maxi=1.0)
parset['mass']['N'] = ncomp
parset['agebins']['N'] = ncomp
parset['agebins']['init'] = agebins
parset['z_fraction']['N'] = len(zinit)
parset['z_fraction']['init'] = zinit
parset['z_fraction']['prior'] = zprior
return parset
def adjust_continuity_agebins(parset, tuniv=13.7, nbins=7):
"""defines agebins
the first two agebins are hard-coded to be 0-30 Myr, 30-100 Myr
the final agebin is hard-coded to cover 0.85*t_univ-t_univ
the rest split logarithmic time evenly
inputs:
tuniv is the age of the Universe in Gyr
nbins is the number of SFH bins
"""
if nbins < 4:
raise ValueError('Must have nbins >= 4, returning')
tbinmax = (tuniv * 0.85) * 1e9
lim1, lim2 = 7.4772, 8.0
agelims = [0,lim1] + np.linspace(lim2,np.log10(tbinmax),nbins-2).tolist() + [np.log10(tuniv*1e9)]
agebins = np.array([agelims[:-1], agelims[1:]])
ncomp = nbins
mean = np.zeros(ncomp-1)
scale = np.ones_like(mean) * 0.3
df = np.ones_like(mean) * 2
rprior = priors.StudentT(mean=mean, scale=scale, df=df)
parset['mass']['N'] = ncomp
parset['agebins']['N'] = ncomp
parset['agebins']['init'] = agebins.T
parset["logsfr_ratios"]["N"] = ncomp - 1
parset["logsfr_ratios"]["init"] = mean
parset["logsfr_ratios"]["prior"] = rprior
return parset
TemplateLibrary = Directory()
# A template for what parameter configuration element should look like
par_name = {"N": 1,
"isfree": True,
"init": 0.5,
"prior": priors.TopHat(mini=0, maxi=1.0),
"depends_on": None,
"units": "",}
# ---------------------
# --- Explicit defaults
# --------------------
imf = {"N": 1, "isfree": False, "init": 2} # Kroupa
dust_type = {"N": 1, "isfree": False, "init": 0} # Power-law
_defaults_ = {"imf_type": imf, # FSPS parameter
"dust_type": dust_type # FSPS parameter
}
TemplateLibrary["type_defaults"] = (_defaults_,
"Explicitly sets dust amd IMF types.")
# --------------------------
# --- Some (very) common parameters ----
# --------------------------
zred = {"N": 1, "isfree": False,
"init": 0.1,
"units": "redshift",
"prior": priors.TopHat(mini=0.0, maxi=4.0)}
mass = {"N": 1, "isfree": True,
"init": 1e10,
"units": "Solar masses formed",
"prior": priors.LogUniform(mini=1e8, maxi=1e12)}
logzsol = {"N": 1, "isfree": True,
"init": -0.5,
"units": r"$\log (Z/Z_\odot)$",
"prior": priors.TopHat(mini=-2, maxi=0.19)}
dust2 = {"N": 1, "isfree": True,
"init": 0.6,
"units": "optical depth at 5500AA",
"prior": priors.TopHat(mini=0.0, maxi=2.0)}
sfh = {"N": 1, "isfree": False, "init": 0, "units": "FSPS index"}
tage = {"N": 1, "isfree": True,
"init": 1, "units": "Gyr",
"prior": priors.TopHat(mini=0.001, maxi=13.8)}
_basic_ = {"zred":zred,
"mass": mass,
"logzsol": logzsol, # FSPS parameter
"dust2": dust2, # FSPS parameter
"sfh": sfh, # FSPS parameter
"tage": tage # FSPS parameter
}
_basic_.update(_defaults_)
TemplateLibrary["ssp"] = (_basic_,
("Basic set of (free) parameters for a delta function SFH"))
# ----------------------------
# --- Parametric SFH -----
# ----------------------------
_parametric_ = TemplateLibrary["ssp"]
_parametric_["sfh"]["init"] = 4 # Delay-tau
_parametric_["tau"] = {"N": 1, "isfree": True,
"init": 1, "units": "Gyr^{-1}",
"prior": priors.LogUniform(mini=0.1, maxi=30)}
TemplateLibrary["parametric_sfh"] = (_parametric_,
("Basic set of (free) parameters for a delay-tau SFH."))
# --------------------------
# --- Dust emission ----
# --------------------------
add_duste = {"N": 1, "isfree": False, "init": True}
duste_umin = {"N": 1, "isfree": False,
"init": 1.0, "units": 'MMP83 local MW intensity',
"prior": priors.TopHat(mini=0.1, maxi=25)}
duste_qpah = {"N": 1, "isfree": False,
'init': 4.0, "units": 'Percent mass fraction of PAHs in dust.',
"prior": priors.TopHat(mini=0.5, maxi=7.0)}
duste_gamma = {"N": 1, "isfree": False,
"init": 1e-3, "units": 'Mass fraction of dust in high radiation intensity.',
"prior": priors.LogUniform(mini=1e-3, maxi=0.15)}
_dust_emission_ = {"add_dust_emission": add_duste,
"duste_umin": duste_umin, # FSPS / Draine & Li parameter
"duste_qpah": duste_qpah, # FSPS / Draine & Li parameter
"duste_gamma": duste_gamma # FSPS / Draine & Li parameter
}
TemplateLibrary["dust_emission"] = (_dust_emission_,
("The set of (fixed) dust emission parameters."))
# --------------------------
# --- Nebular Emission ----
# --------------------------
add_neb = {'N': 1, 'isfree': False, 'init': True}
neb_cont = {'N': 1, 'isfree': False, 'init': True}
neb_spec = {'N': 1, 'isfree': False, 'init': True}
# Note this depends on stellar metallicity
gas_logz = {'N': 1, 'isfree': False,
'init': 0.0, 'units': r'log Z/Z_\odot',
'depends_on': transforms.stellar_logzsol,
'prior': priors.TopHat(mini=-2.0, maxi=0.5)}
gas_logu = {"N": 1, 'isfree': False,
'init': -2.0, 'units': r"Q_H/N_H",
'prior': priors.TopHat(mini=-4, maxi=-1)}
_nebular_ = {"add_neb_emission": add_neb, # FSPS parameter.
"add_neb_continuum": neb_cont, # FSPS parameter.
"nebemlineinspec": neb_spec, # FSPS parameter.
"gas_logz": gas_logz, # FSPS parameter.
"gas_logu": gas_logu, # FSPS parameter.
}
TemplateLibrary["nebular"] = (_nebular_,
("The set of nebular emission parameters, "
"with gas_logz tied to stellar logzsol."))
# --------------------------
# --- AGN Torus Emission ---
# --------------------------
add_agn = {"N": 1, "isfree": False, "init": True}
fagn = {'N': 1, 'isfree': False,
'init': 1e-4, 'units': r'L_{AGN}/L_*',
'prior': priors.LogUniform(mini=1e-5, maxi=3.0)}
agn_tau = {"N": 1, 'isfree': False,
"init": 5.0, 'units': r"optical depth",
'prior': priors.LogUniform(mini=5.0, maxi=150.)}
_agn_ = {"fagn": fagn, # FSPS parameter.
"agn_tau": agn_tau, # FSPS parameter.
"add_agn_dust": add_agn
}
TemplateLibrary["agn"] = (_agn_,
("The set of (fixed) AGN dusty torus emission parameters."))
# --------------------------
# --- IGM Absorption ---
# --------------------------
add_igm = {'N': 1, 'isfree': False, 'init': True}
igm_fact ={'N': 1, 'isfree': False, 'init': 1.0,
'units': 'factor by which to scale the Madau attenuation',
'prior': priors.ClippedNormal(mean=1.0, sigma=0.1, mini=0.0, maxi=2.0)}
_igm_ = {"add_igm_absorption": add_igm, # FSPS Parameter.
"igm_factor": igm_fact, # FSPS Parameter.
}
TemplateLibrary["igm"] = (_igm_,
("The set of (fixed) IGM absorption parameters."))
# --------------------------
# --- Spectral Smoothing ---
# --------------------------
smooth = {'N': 1, 'isfree': False, 'init': 'vel'}
fft = {'N': 1, 'isfree': False, 'init': True}
wlo = {'N': 1, 'isfree': False, 'init': 3500.0, 'units': r'$\AA$'}
whi = {'N': 1, 'isfree': False, 'init': 7800.0, 'units': r'$\AA$'}
sigma_smooth = {'N': 1, 'isfree': True,
'init': 200.0, 'units': 'km/s',
'prior': priors.TopHat(mini=10, maxi=300)}
_smoothing_ = {"smoothtype": smooth, "fftsmooth": fft, # prospecter `smoothspec` parameter
#"min_wave_smooth": wlo, "max_wave_smooth": whi,
"sigma_smooth": sigma_smooth # prospecter `smoothspec` parameter
}
TemplateLibrary["spectral_smoothing"] = (_smoothing_,
("Set of parameters for spectal smoothing."))
# --------------------------
# --- Spectral calibration
# -------------------------
spec_norm = {'N': 1, 'isfree': False,
'init': 1.0, 'units': 'f_true/f_obs',
'prior': priors.Normal(mean=1.0, sigma=0.1)}
# What order polynomial?
npoly = 12
porder = {'N': 1, 'isfree': False, 'init': npoly}
preg = {'N': 1, 'isfree': False, 'init': 0.}
polymax = 0.1 / (np.arange(npoly) + 1)
pcoeffs = {'N': npoly, 'isfree': True,
'init': np.zeros(npoly),
'units': 'ln(f_tru/f_obs)_j=\sum_{i=1}^N poly_coeffs_{i-1} * lambda_j^i',
'prior': priors.TopHat(mini=-polymax, maxi=polymax)}
_polyopt_ = {"polyorder": porder, # order of polynomial to optimize
"poly_regularization": preg, # Regularization of polynomial coeffs (can be a vector).
"spec_norm": spec_norm # Overall normalization of the spectrum.
}
_polyfit_ = {"spec_norm": spec_norm, # Overall normalization of the spectrum.
"poly_coeffs": pcoeffs # Polynomial coefficients
}
TemplateLibrary["optimize_speccal"] = (_polyopt_,
("Set of parameters (most of which are fixed) "
"for optimizing a polynomial calibration vector."))
TemplateLibrary["fit_speccal"] = (_polyfit_,
("Set of parameters (most of which are free) for sampling "
"the coefficients of a polynomial calibration vector."))
# ----------------------------
# --- Additional SF Bursts ---
# ---------------------------
fage_burst = {'N': 1, 'isfree': False,
'init': 0.0, 'units': 'time at wich burst happens, as a fraction of `tage`',
'prior': priors.TopHat(mini=0.5, maxi=1.0)}
tburst = {'N': 1, 'isfree': False,
'init': 0.0, 'units': 'Gyr',
'prior': None, 'depends_on': transforms.tburst_from_fage}
fburst = {'N': 1, 'isfree': False,
'init': 0.0, 'units': 'fraction of total mass formed in the burst',
'prior': priors.TopHat(mini=0.0, maxi=0.5)}
_burst_ = {"tburst": tburst,
"fburst": fburst,
"fage_burst": fage_burst}
TemplateLibrary["burst_sfh"] = (_burst_,
("The set of (fixed) parameters for an SF burst "
"added to a parameteric SFH, with the burst time "
"controlled by `fage_burst`."))
# -----------------------------------
# --- Nonparametric-logmass SFH ----
# -----------------------------------
# Using a (perhaps dangerously) simple nonparametric model of mass in fixed time bins with a logarithmic prior.
_nonpar_lm_ = TemplateLibrary["ssp"]
_ = _nonpar_lm_.pop("tage")
_nonpar_lm_["sfh"] = {"N": 1, "isfree": False, "init": 3, "units": "FSPS index"}
# This will be the mass in each bin. It depends on other free and fixed
# parameters. Its length needs to be modified based on the number of bins
_nonpar_lm_["mass"] = {'N': 3, 'isfree': True, 'init': 1e6, 'units': r'M$_\odot$',
'prior': priors.LogUniform(mini=1e5, maxi=1e12)}
# This gives the start and stop of each age bin. It can be adjusted and its
# length must match the lenth of "mass"
_nonpar_lm_["agebins"] = {'N': 3, 'isfree': False,
'init': [[0.0, 8.0], [8.0, 9.0], [9.0, 10.0]],
'units': 'log(yr)'}
# This is the *total* stellar mass formed
_nonpar_lm_["total_mass"] = {"N": 1, "isfree": False, "init": 1e10, "units": "Solar masses formed",
"depends_on": transforms.total_mass}
TemplateLibrary["logm_sfh"] = (_nonpar_lm_,
"Non-parameteric SFH fitting for log-mass in fixed time bins")
# ----------------------------
# --- Continuity SFH ----
# ----------------------------
# A non-parametric SFH model of mass in fixed time bins with a smoothness prior
_nonpar_continuity_ = TemplateLibrary["ssp"]
_ = _nonpar_continuity_.pop("tage")
_nonpar_continuity_["sfh"] = {"N": 1, "isfree": False, "init": 3, "units": "FSPS index"}
# This is the *total* mass formed, as a variable
_nonpar_continuity_["logmass"] = {"N": 1, "isfree": True, "init": 10, 'units': 'Msun',
'prior': priors.TopHat(mini=7, maxi=12)}
# This will be the mass in each bin. It depends on other free and fixed
# parameters. Its length needs to be modified based on the number of bins
_nonpar_continuity_["mass"] = {'N': 3, 'isfree': False, 'init': 1e6, 'units': r'M$_\odot$',
'depends_on': transforms.logsfr_ratios_to_masses}
# This gives the start and stop of each age bin. It can be adjusted and its
# length must match the lenth of "mass"
_nonpar_continuity_["agebins"] = {'N': 3, 'isfree': False,
'init': [[0.0, 8.0], [8.0, 9.0], [9.0, 10.0]],
'units': 'log(yr)'}
# This controls the distribution of SFR(t) / SFR(t+dt). It has NBINS-1 components.
_nonpar_continuity_["logsfr_ratios"] = {'N': 2, 'isfree': True, 'init': [0.0,0.0],
'prior':priors.StudentT(mean=np.full(2,0.0),
scale=np.full(2,0.3),
df=np.full(2,2))}
TemplateLibrary["continuity_sfh"] = (_nonpar_continuity_,
"Non-parameteric SFH fitting for mass in fixed time bins with a smoothness prior")
# ----------------------------
# --- Flexible Continuity SFH ----
# ----------------------------
# A non-parametric SFH model of mass in flexible time bins with a smoothness prior
_nonpar_continuity_flex_ = TemplateLibrary["ssp"]
_ = _nonpar_continuity_flex_.pop("tage")
_nonpar_continuity_flex_["sfh"] = {"N": 1, "isfree": False, "init": 3, "units": "FSPS index"}
#_nonpar_continuity_flex_["tuniv"] = {"N": 1, "isfree": False, "init": 13.7, "units": "Gyr"}
# This is the *total* mass formed
_nonpar_continuity_flex_["logmass"] = {"N": 1, "isfree": True, "init": 10, 'units': 'Msun',
'prior': priors.TopHat(mini=7, maxi=12)}
# These variables control the ratio of SFRs in adjacent bins
# there is one for a fixed "youngest" bin, one for the fixed "oldest" bin, and (N-1) for N flexible bins in between
_nonpar_continuity_flex_["logsfr_ratio_young"] = {'N': 1, 'isfree': True, 'init': 0.0, 'units': r'dlogSFR (dex)',
'prior': priors.StudentT(mean=0.0, scale=0.3, df=2)}
_nonpar_continuity_flex_["logsfr_ratio_old"] = {'N': 1, 'isfree': True, 'init': 0.0, 'units': r'dlogSFR (dex)',
'prior': priors.StudentT(mean=0.0, scale=0.3, df=2)}
_nonpar_continuity_flex_["logsfr_ratios"] = {'N': 1, 'isfree': True, 'init': 0.0, 'units': r'dlogSFR (dex)',
'prior': priors.StudentT(mean=0.0, scale=0.3, df=2)}
# This will be the mass in each bin. It depends on other free and fixed
# parameters. Its length needs to be modified based on the total number of
# bins (including fixed young and old bin)
_nonpar_continuity_flex_["mass"] = {'N': 4, 'isfree': False, 'init': 1e6, 'units': r'M$_\odot$',
'depends_on': transforms.logsfr_ratios_to_masses_flex}
# This gives the start and stop of each age bin. It can be adjusted and its
# length must match the lenth of "mass"
_nonpar_continuity_flex_["agebins"] = {'N': 4, 'isfree': False,
'depends_on': transforms.logsfr_ratios_to_agebins,
'init': [[0.0, 7.5], [7.5, 8.5],[8.5,9.7], [9.7, 10.136]],
'units': 'log(yr)'}
TemplateLibrary["continuity_flex_sfh"] = (_nonpar_continuity_flex_,
("Non-parameteric SFH fitting for mass in flexible time "
"bins with a smoothness prior"))
# ----------------------------
# --- Dirichlet SFH ----
# ----------------------------
# Using the dirichlet prior on SFR fractions in bins of constant SF.
_dirichlet_ = TemplateLibrary["ssp"]
_ = _dirichlet_.pop("tage")
_dirichlet_["sfh"] = {"N": 1, "isfree": False, "init": 3, "units": "FSPS index"}
# This will be the mass in each bin. It depends on other free and fixed
# parameters. It's length needs to be modified based on the number of bins
_dirichlet_["mass"] = {'N': 3, 'isfree': False, 'init': 1., 'units': r'M$_\odot$',
'depends_on': transforms.zfrac_to_masses}
# This gives the start and stop of each age bin. It can be adjusted and its
# length must match the lenth of "mass"
_dirichlet_["agebins"] = {'N': 3, 'isfree': False,
'init': [[0.0, 8.0], [8.0, 9.0], [9.0, 10.0]],
'units': 'log(yr)'}
# Auxiliary variable used for sampling sfr_fractions from dirichlet. This
# *must* be adjusted depending on the number of bins
_dirichlet_["z_fraction"] = {"N": 2, 'isfree': True, 'init': [0, 0], 'units': None,
'prior': priors.Beta(alpha=1.0, beta=1.0, mini=0.0, maxi=1.0)}
# This is the *total* stellar mass formed
_dirichlet_["total_mass"] = mass
TemplateLibrary["dirichlet_sfh"] = (_dirichlet_,
"Non-parameteric SFH with Dirichlet prior (fractional SFR)")
# ----------------------------
# --- Prospector-alpha ---
# ----------------------------
_alpha_ = TemplateLibrary["dirichlet_sfh"]
_alpha_.update(TemplateLibrary["dust_emission"])
_alpha_.update(TemplateLibrary["nebular"])
_alpha_.update(TemplateLibrary["agn"])
# Set the dust and agn emission free
_alpha_["duste_qpah"]["isfree"] = True
_alpha_["duste_umin"]["isfree"] = True
_alpha_["duste_gamma"]["isfree"] = True
_alpha_["fagn"]["isfree"] = True
_alpha_["agn_tau"]["isfree"] = True
# Complexify the dust attenuation
_alpha_["dust_type"] = {"N": 1, "isfree": False, "init": 4, "units": "FSPS index"}
_alpha_["dust2"]["prior"] = priors.TopHat(mini=0.0, maxi=4.0)
_alpha_["dust1"] = {"N": 1, "isfree": False, 'depends_on': transforms.dustratio_to_dust1,
"init": 0.0, "units": "optical depth towards young stars"}
_alpha_["dust_ratio"] = {"N": 1, "isfree": True,
"init": 1.0, "units": "ratio of birth-cloud to diffuse dust",
"prior": priors.ClippedNormal(mini=0.0, maxi=2.0, mean=1.0, sigma=0.3)}
_alpha_["dust_index"] = {"N": 1, "isfree": True,
"init": 0.0, "units": "power-law multiplication of Calzetti",
"prior": priors.TopHat(mini=-2.0, maxi=0.5)}
# in Gyr
alpha_agelims = np.array([1e-9, 0.1, 0.3, 1.0, 3.0, 6.0, 13.6])
_alpha_ = adjust_dirichlet_agebins(_alpha_, agelims=(np.log10(alpha_agelims) + 9))
TemplateLibrary["alpha"] = (_alpha_,
"The prospector-alpha model, Leja et al. 2017")
| [
"numpy.full",
"copy.deepcopy",
"numpy.ones_like",
"numpy.zeros",
"numpy.array",
"numpy.arange",
"numpy.log10"
] | [((21866, 21914), 'numpy.array', 'np.array', (['[1e-09, 0.1, 0.3, 1.0, 3.0, 6.0, 13.6]'], {}), '([1e-09, 0.1, 0.3, 1.0, 3.0, 6.0, 13.6])\n', (21874, 21914), True, 'import numpy as np\n'), ((2509, 2536), 'numpy.arange', 'np.arange', (['(ncomp - 1)', '(0)', '(-1)'], {}), '(ncomp - 1, 0, -1)\n', (2518, 2536), True, 'import numpy as np\n'), ((3501, 3538), 'numpy.array', 'np.array', (['[agelims[:-1], agelims[1:]]'], {}), '([agelims[:-1], agelims[1:]])\n', (3509, 3538), True, 'import numpy as np\n'), ((3569, 3588), 'numpy.zeros', 'np.zeros', (['(ncomp - 1)'], {}), '(ncomp - 1)\n', (3577, 3588), True, 'import numpy as np\n'), ((11378, 11393), 'numpy.zeros', 'np.zeros', (['npoly'], {}), '(npoly)\n', (11386, 11393), True, 'import numpy as np\n'), ((908, 934), 'copy.deepcopy', 'deepcopy', (['self._entries[k]'], {}), '(self._entries[k])\n', (916, 934), False, 'from copy import deepcopy\n'), ((2226, 2263), 'numpy.array', 'np.array', (['[agelims[:-1], agelims[1:]]'], {}), '([agelims[:-1], agelims[1:]])\n', (2234, 2263), True, 'import numpy as np\n'), ((3599, 3617), 'numpy.ones_like', 'np.ones_like', (['mean'], {}), '(mean)\n', (3611, 3617), True, 'import numpy as np\n'), ((3633, 3651), 'numpy.ones_like', 'np.ones_like', (['mean'], {}), '(mean)\n', (3645, 3651), True, 'import numpy as np\n'), ((11298, 11314), 'numpy.arange', 'np.arange', (['npoly'], {}), '(npoly)\n', (11307, 11314), True, 'import numpy as np\n'), ((2578, 2597), 'numpy.ones_like', 'np.ones_like', (['alpha'], {}), '(alpha)\n', (2590, 2597), True, 'import numpy as np\n'), ((3466, 3496), 'numpy.log10', 'np.log10', (['(tuniv * 1000000000.0)'], {}), '(tuniv * 1000000000.0)\n', (3474, 3496), True, 'import numpy as np\n'), ((16178, 16193), 'numpy.full', 'np.full', (['(2)', '(0.0)'], {}), '(2, 0.0)\n', (16185, 16193), True, 'import numpy as np\n'), ((16264, 16279), 'numpy.full', 'np.full', (['(2)', '(0.3)'], {}), '(2, 0.3)\n', (16271, 16279), True, 'import numpy as np\n'), ((16347, 16360), 'numpy.full', 'np.full', (['(2)', '(2)'], {}), '(2, 2)\n', (16354, 16360), True, 'import numpy as np\n'), ((21967, 21990), 'numpy.log10', 'np.log10', (['alpha_agelims'], {}), '(alpha_agelims)\n', (21975, 21990), True, 'import numpy as np\n'), ((3427, 3444), 'numpy.log10', 'np.log10', (['tbinmax'], {}), '(tbinmax)\n', (3435, 3444), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for creating plots."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import operator
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from batch_science.measurement_utils import compute_steps_to_result
from batch_science.measurement_utils import get_index_values
def create_subplots(nrows, ncols, plot_width=9, subplot_aspect_ratio=8 / 7):
"""Creates a subplot grid with the specified width and aspect ratio."""
plot_height = nrows * plot_width / ncols / subplot_aspect_ratio
return plt.subplots(nrows, ncols, figsize=(plot_width, plot_height))
def plot_steps_to_result(ax,
results,
add_scaling=True,
scaling_label=None,
normalizing_batch_size=None):
"""Plots steps to result vs batch size.
Args:
ax: Instance of pyplot.axes.Axes on which to plot.
results: DataFrame of measurements indexed by (batch_size, step) with one row
per batch size. Or, a dictionary of such DataFrames.
add_scaling: Whether to draw a line indicating "perfect scaling".
scaling_label: The label in the results dictionary used to draw the "perfect
scaling" line (provided add_scaling is True). If not specified, a separate
line is drawn for each label.
normalizing_batch_size: If specified, the steps to result curves are
normalized for each label in the results dictionary by the number of steps
at this batch size.
"""
if isinstance(results, pd.DataFrame):
results = {"": results}
for label, df in results.items():
batch_sizes = get_index_values(df, "batch_size")
steps = get_index_values(df, "step")
# Possibly normalize the steps.
if normalizing_batch_size:
normalizing_index = np.where(batch_sizes == normalizing_batch_size)[0]
if len(normalizing_index) != 1:
raise ValueError(
"Expected one row with batch_size={}, but found {}".format(
normalizing_batch_size, len(normalizing_index)))
steps = steps.astype(np.float) / steps[normalizing_index]
# Plot steps to result.
ax.plot(batch_sizes, steps, "^-", label=label)
# Possibly plot "perfect scaling".
if add_scaling and (not scaling_label or label == scaling_label):
if normalizing_batch_size:
scale = steps[normalizing_index] * normalizing_batch_size
else:
scale = steps[0] * batch_sizes[0]
linear_scaling = scale / batch_sizes
ax.plot(batch_sizes, linear_scaling, "k--", label="_nolegend_")
# Format the axes.
ax.set_xlabel("Batch Size")
if normalizing_batch_size:
ylabel = "Steps / (Steps at B={})".format(normalizing_batch_size)
else:
ylabel = "Steps"
ax.set_ylabel(ylabel)
ax.set_xscale("log", basex=2)
ax.set_yscale("log", basey=2)
ax.grid(True)
def plot_optimal_metaparameter_values(ax, parameter_to_plot, steps_to_result,
workload_metadata):
"""Plots the values of the optimal metaparameters vs batch size.
Args:
ax: Instance of pyplot.axes.Axes on which to plot.
parameter_to_plot: One of ["Learning Rate", "Momentum", "Effective Learning
Rate"].
steps_to_result: DataFrame of measurements indexed by (batch_size, step)
corresponding to the optimal measurements for each batch size.
workload_metadata: A dict containing the metadata for each study.
"""
# Get the parameters corresponding to the optimal measurements.
batch_sizes = get_index_values(steps_to_result, "batch_size")
trial_ids = get_index_values(steps_to_result, "trial_id")
optimal_parameters = [
workload_metadata[batch_size]["trials"][trial_id]["parameters"]
for batch_size, trial_id in zip(batch_sizes, trial_ids)
]
# Compute y-values for the parameter to plot.
ylabel = parameter_to_plot
plot_heuristics = True
if parameter_to_plot == "Learning Rate":
yvalues = np.array(
[parameters["learning_rate"] for parameters in optimal_parameters])
elif parameter_to_plot == "Momentum":
yvalues = np.array(
[parameters["momentum"] for parameters in optimal_parameters])
plot_heuristics = False
elif parameter_to_plot == "Effective Learning Rate":
learning_rates = np.array(
[parameters["learning_rate"] for parameters in optimal_parameters])
momenta = np.array(
[parameters["momentum"] for parameters in optimal_parameters])
yvalues = learning_rates / (1 - momenta)
ylabel = "Learning Rate / (1 - Momentum)"
else:
raise ValueError(
"Unrecognized parameter_to_plot: {}".format(parameter_to_plot))
# Plot the optimal parameter values vs batch size.
ax.plot(batch_sizes, yvalues, "^-", label="Optimal " + parameter_to_plot)
# Plot the "linear" and "square root" scaling heuristics for adjusting the
# metaparameter values with increasing batch size.
if plot_heuristics:
linear_heuristic = [
yvalues[0] * batch_size / batch_sizes[0] for batch_size in batch_sizes
]
ax.plot(
batch_sizes,
linear_heuristic,
linestyle="--",
c="k",
label="Linear Heuristic")
sqrt_heuristic = [
yvalues[0] * np.sqrt(batch_size / batch_sizes[0])
for batch_size in batch_sizes
]
ax.plot(
batch_sizes,
sqrt_heuristic,
linestyle="-.",
c="g",
label="Square Root Heuristic")
# Format the axes.
ax.set_xlabel("Batch Size")
ax.set_ylabel(ylabel)
ax.set_xscale("log", basex=2)
ax.set_yscale("log", basey=2)
ax.grid(True)
def _unpack_params(params):
"""Extracts vectors of (learning_rate, one_minus_momentum) from parameters."""
if not params:
return [], []
xy = [(p["learning_rate"], 1 - p["momentum"]) for p in params]
return zip(*xy)
def plot_learning_rate_momentum_scatter(ax,
objective_col_name,
objective_goal,
study_table,
study_metadata,
xlim,
ylim,
maximize=False):
"""Plots a categorized scatter plot of learning rate and (1 - momentum).
Trials are categorized by those that reached the goal objective value, those
that did not, and those that diverged during training.
Args:
ax: Instance of pyplot.axes.Axes on which to plot.
objective_col_name: Column name of the objective metric.
objective_goal: Threshold value of the objective metric indicating a
successful trial.
study_table: DataFrame of all measurements in the study indexed by (trial_id,
step).
study_metadata: A dict of study metadata.
xlim: A pair (x_min, x_max) corresponding to the minimum and maximum learning
rates to plot.
ylim: A pair (y_min, y_max) corresponding to the minimum and maximum momentum
values to plot.
maximize: Whether the goal is to maximize (as opposed to minimize) the
objective metric.
"""
# Extract the parameters corresponding to each trial in 3 categories: those
# that reached the goal objective value, those that did not, and those that
# diverged during training.
good_params = []
bad_params = []
infeasible_params = []
comparator = operator.gt if maximize else operator.lt
for trial_id, trial_metadata in study_metadata["trials"].items():
params = trial_metadata["parameters"]
if trial_metadata["status"] == "COMPLETE":
measurements = study_table.loc[trial_id][objective_col_name]
if np.any(comparator(measurements, objective_goal)):
good_params.append(params)
else:
bad_params.append(params)
elif trial_metadata["status"] == "INFEASIBLE":
infeasible_params.append(params)
else:
raise ValueError("Unexpected status: {}".format(trial_metadata["status"]))
# Plot all good, bad, and infeasible parameter values.
learning_rate, one_minus_momentum = _unpack_params(good_params)
ax.scatter(
learning_rate,
one_minus_momentum,
c="b",
marker="o",
alpha=1.0,
s=40,
label="Goal Achieved")
learning_rate, one_minus_momentum = _unpack_params(bad_params)
ax.scatter(
learning_rate,
one_minus_momentum,
c="r",
marker="^",
alpha=0.7,
s=40,
label="Goal Not Achieved")
learning_rate, one_minus_momentum = _unpack_params(infeasible_params)
ax.scatter(
learning_rate,
one_minus_momentum,
alpha=0.7,
marker="x",
c="k",
s=25,
label="Infeasible")
# Format the axes.
ax.set_xlabel("Batch Size")
ax.set_xscale("log")
ax.set_xlim(xlim)
ax.set_ylabel("1 - Momentum")
ax.set_yscale("log")
ax.set_ylim(ylim)
# Plot contour lines.
grid_x = np.logspace(np.log10(xlim[0]), np.log10(xlim[1]), num=50)
grid_y = np.logspace(np.log10(ylim[0]), np.log10(ylim[1]), num=50)
grid_xx, grid_yy = np.meshgrid(grid_x, grid_y)
grid_z = np.log10(grid_xx / grid_yy)
ax.contour(grid_xx, grid_yy, grid_z, 10, colors="black", alpha=0.5)
# Plot the best measurement as a yellow star.
str_measurement = compute_steps_to_result(study_table, objective_col_name,
objective_goal, maximize, None)
if not str_measurement.empty:
best_trial_id = get_index_values(str_measurement, "trial_id")[0]
best_trial_params = study_metadata["trials"][best_trial_id]["parameters"]
learning_rate, one_minus_momentum = _unpack_params([best_trial_params])
ax.scatter(
learning_rate,
one_minus_momentum,
marker="*",
alpha=1.0,
s=400,
c="yellow")
def plot_best_measurements(ax, best_measurements, objective_col_name):
"""Plots the best objective value vs batch size.
Args:
ax: Instance of pyplot.axes.Axes on which to plot.
best_measurements: DataFrame of measurements indexed by batch_size with one
row per batch size. Or, a dictionary of such DataFrames.
objective_col_name: Column name of the objective metric.
"""
if isinstance(best_measurements, pd.DataFrame):
best_measurements = {"": best_measurements}
for label, df in best_measurements.items():
batch_sizes = get_index_values(df, "batch_size")
best_objective_values = df[objective_col_name]
ax.plot(batch_sizes, best_objective_values, "^-", label=label)
# Format the axes.
ax.set_xlabel("Batch Size")
ax.set_xscale("log", basex=2)
ax.grid(True)
| [
"numpy.meshgrid",
"batch_science.measurement_utils.compute_steps_to_result",
"numpy.where",
"numpy.array",
"numpy.log10",
"numpy.sqrt",
"matplotlib.pyplot.subplots",
"batch_science.measurement_utils.get_index_values"
] | [((1208, 1269), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nrows', 'ncols'], {'figsize': '(plot_width, plot_height)'}), '(nrows, ncols, figsize=(plot_width, plot_height))\n', (1220, 1269), True, 'import matplotlib.pyplot as plt\n'), ((4179, 4226), 'batch_science.measurement_utils.get_index_values', 'get_index_values', (['steps_to_result', '"""batch_size"""'], {}), "(steps_to_result, 'batch_size')\n", (4195, 4226), False, 'from batch_science.measurement_utils import get_index_values\n'), ((4241, 4286), 'batch_science.measurement_utils.get_index_values', 'get_index_values', (['steps_to_result', '"""trial_id"""'], {}), "(steps_to_result, 'trial_id')\n", (4257, 4286), False, 'from batch_science.measurement_utils import get_index_values\n'), ((9691, 9718), 'numpy.meshgrid', 'np.meshgrid', (['grid_x', 'grid_y'], {}), '(grid_x, grid_y)\n', (9702, 9718), True, 'import numpy as np\n'), ((9730, 9757), 'numpy.log10', 'np.log10', (['(grid_xx / grid_yy)'], {}), '(grid_xx / grid_yy)\n', (9738, 9757), True, 'import numpy as np\n'), ((9897, 9989), 'batch_science.measurement_utils.compute_steps_to_result', 'compute_steps_to_result', (['study_table', 'objective_col_name', 'objective_goal', 'maximize', 'None'], {}), '(study_table, objective_col_name, objective_goal,\n maximize, None)\n', (9920, 9989), False, 'from batch_science.measurement_utils import compute_steps_to_result\n'), ((2292, 2326), 'batch_science.measurement_utils.get_index_values', 'get_index_values', (['df', '"""batch_size"""'], {}), "(df, 'batch_size')\n", (2308, 2326), False, 'from batch_science.measurement_utils import get_index_values\n'), ((2339, 2367), 'batch_science.measurement_utils.get_index_values', 'get_index_values', (['df', '"""step"""'], {}), "(df, 'step')\n", (2355, 2367), False, 'from batch_science.measurement_utils import get_index_values\n'), ((4608, 4684), 'numpy.array', 'np.array', (["[parameters['learning_rate'] for parameters in optimal_parameters]"], {}), "([parameters['learning_rate'] for parameters in optimal_parameters])\n", (4616, 4684), True, 'import numpy as np\n'), ((9555, 9572), 'numpy.log10', 'np.log10', (['xlim[0]'], {}), '(xlim[0])\n', (9563, 9572), True, 'import numpy as np\n'), ((9574, 9591), 'numpy.log10', 'np.log10', (['xlim[1]'], {}), '(xlim[1])\n', (9582, 9591), True, 'import numpy as np\n'), ((9624, 9641), 'numpy.log10', 'np.log10', (['ylim[0]'], {}), '(ylim[0])\n', (9632, 9641), True, 'import numpy as np\n'), ((9643, 9660), 'numpy.log10', 'np.log10', (['ylim[1]'], {}), '(ylim[1])\n', (9651, 9660), True, 'import numpy as np\n'), ((10983, 11017), 'batch_science.measurement_utils.get_index_values', 'get_index_values', (['df', '"""batch_size"""'], {}), "(df, 'batch_size')\n", (10999, 11017), False, 'from batch_science.measurement_utils import get_index_values\n'), ((4748, 4819), 'numpy.array', 'np.array', (["[parameters['momentum'] for parameters in optimal_parameters]"], {}), "([parameters['momentum'] for parameters in optimal_parameters])\n", (4756, 4819), True, 'import numpy as np\n'), ((10082, 10127), 'batch_science.measurement_utils.get_index_values', 'get_index_values', (['str_measurement', '"""trial_id"""'], {}), "(str_measurement, 'trial_id')\n", (10098, 10127), False, 'from batch_science.measurement_utils import get_index_values\n'), ((2462, 2509), 'numpy.where', 'np.where', (['(batch_sizes == normalizing_batch_size)'], {}), '(batch_sizes == normalizing_batch_size)\n', (2470, 2509), True, 'import numpy as np\n'), ((4933, 5009), 'numpy.array', 'np.array', (["[parameters['learning_rate'] for parameters in optimal_parameters]"], {}), "([parameters['learning_rate'] for parameters in optimal_parameters])\n", (4941, 5009), True, 'import numpy as np\n'), ((5033, 5104), 'numpy.array', 'np.array', (["[parameters['momentum'] for parameters in optimal_parameters]"], {}), "([parameters['momentum'] for parameters in optimal_parameters])\n", (5041, 5104), True, 'import numpy as np\n'), ((5878, 5914), 'numpy.sqrt', 'np.sqrt', (['(batch_size / batch_sizes[0])'], {}), '(batch_size / batch_sizes[0])\n', (5885, 5914), True, 'import numpy as np\n')] |
import psyneulink as pnl
import numpy as np
colors_input_layer = pnl.TransferMechanism(size=3,
function=pnl.Linear,
name='COLORS_INPUT')
words_input_layer = pnl.TransferMechanism(size=3,
function=pnl.Linear,
name='WORDS_INPUT')
task_input_layer = pnl.TransferMechanism(size=2,
function=pnl.Linear,
name='TASK_INPUT')
# Task layer, tasks: ('name the color', 'read the word')
task_layer = pnl.RecurrentTransferMechanism(size=2,
function=pnl.Logistic(),
hetero=-2,
integrator_mode=True,
integration_rate=0.01,
name='TASK_LAYER')
# Hidden layer
# colors: ('red','green', 'neutral') words: ('RED','GREEN', 'NEUTRAL')
colors_hidden_layer = pnl.RecurrentTransferMechanism(size=3,
function=pnl.Logistic(x_0=4.0), # bias 4.0 is -4.0 in the paper see Docs for description
integrator_mode=True,
hetero=-2,
integration_rate=0.01, # cohen-huston text says 0.01
name='COLORS_HIDDEN')
words_hidden_layer = pnl.RecurrentTransferMechanism(size=3,
function=pnl.Logistic(x_0=4.0),
integrator_mode=True,
hetero=-2,
integration_rate=0.01,
name='WORDS_HIDDEN')
# Response layer, responses: ('red', 'green')
response_layer = pnl.RecurrentTransferMechanism(size=2,
function=pnl.Logistic(),
hetero=-2.0,
integrator_mode=True,
integration_rate=0.01,
output_ports = [pnl.RESULT,
{pnl.NAME: 'DECISION_ENERGY',
pnl.VARIABLE: (pnl.OWNER_VALUE,0),
pnl.FUNCTION: pnl.Stability(
default_variable = np.array([0.0, 0.0]),
metric = pnl.ENERGY,
matrix = np.array([[0.0, -4.0],
[-4.0, 0.0]]))}],
name='RESPONSE', )
# Mapping projections---------------------------------------------------------------------------------------------------
color_input_weights = pnl.MappingProjection(matrix=np.array([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]]))
word_input_weights = pnl.MappingProjection(matrix=np.array([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]]))
task_input_weights = pnl.MappingProjection(matrix=np.array([[1.0, 0.0],
[0.0, 1.0]]))
color_task_weights = pnl.MappingProjection(matrix=np.array([[4.0, 0.0],
[4.0, 0.0],
[4.0, 0.0]]))
task_color_weights = pnl.MappingProjection(matrix=np.array([[4.0, 4.0, 4.0],
[0.0, 0.0, 0.0]]))
response_color_weights = pnl.MappingProjection(matrix=np.array([[1.5, 0.0, 0.0],
[0.0, 1.5, 0.0]]))
response_word_weights = pnl.MappingProjection(matrix=np.array([[2.5, 0.0, 0.0],
[0.0, 2.5, 0.0]]))
color_response_weights = pnl.MappingProjection(matrix=np.array([[1.5, 0.0],
[0.0, 1.5],
[0.0, 0.0]]))
word_response_weights = pnl.MappingProjection(matrix=np.array([[2.5, 0.0],
[0.0, 2.5],
[0.0, 0.0]]))
word_task_weights = pnl.MappingProjection(matrix=np.array([[0.0, 4.0],
[0.0, 4.0],
[0.0, 4.0]]))
task_word_weights = pnl.MappingProjection(matrix=np.array([[0.0, 0.0, 0.0],
[4.0, 4.0, 4.0]]))
# CREATE Composition
comp = pnl.Composition()
# Add mechanisms
comp.add_node(colors_input_layer)
comp.add_node(colors_hidden_layer)
comp.add_node(words_input_layer)
comp.add_node(words_hidden_layer)
comp.add_node(task_input_layer)
comp.add_node(task_layer)
comp.add_node(response_layer)
# Add projections
comp.add_projection(task_input_weights, task_input_layer, task_layer)
# Color process
comp.add_projection(color_input_weights, colors_input_layer, colors_hidden_layer)
comp.add_projection(color_response_weights, colors_hidden_layer, response_layer)
comp.add_projection(response_color_weights, response_layer, colors_hidden_layer)
# Word process
comp.add_projection(word_input_weights, words_input_layer, words_hidden_layer)
comp.add_projection(word_response_weights, words_hidden_layer, response_layer)
comp.add_projection(response_word_weights, response_layer, words_hidden_layer)
# Color task process
comp.add_projection(task_color_weights, task_layer, colors_hidden_layer)
comp.add_projection(color_task_weights, colors_hidden_layer, task_layer)
# Word task process
comp.add_projection(task_word_weights, task_layer, words_hidden_layer)
comp.add_projection(word_task_weights, words_hidden_layer, task_layer)
def trial_dict(red_color, green_color, neutral_color, red_word, green_word, neutral_word, CN, WR):
trialdict = {
colors_input_layer: [red_color, green_color, neutral_color],
words_input_layer: [red_word, green_word, neutral_word],
task_input_layer: [CN, WR]
}
return trialdict
# Define initialization trials separately
CN_trial_initialize_input = trial_dict(0, 0, 0, 0, 0, 0, 1, 0) #red_color, green color, red_word, green word, CN, WR
CN_incongruent_trial_input = trial_dict(1, 0, 0, 0, 1, 0, 1, 0) #red_color, green color, red_word, green word, CN, WR
CN_congruent_trial_input = trial_dict(1, 0, 0, 1, 0, 0, 1, 0) #red_color, green color, red_word, green word, CN, WR
CN_control_trial_input = trial_dict(1, 0, 0, 0, 0, 1, 1, 0) #red_color, green color, red_word, green word, CN, WR
Stimulus = [[CN_trial_initialize_input, CN_congruent_trial_input],
[CN_trial_initialize_input, CN_incongruent_trial_input],
[CN_trial_initialize_input, CN_control_trial_input]]
# should be 500 and 1000
ntrials0 = 5
ntrials = 10
comp._analyze_graph()
comp.show_graph()
def run(bin_execute):
results = []
for stim in Stimulus:
# RUN the SYSTEM to initialize ---------------------------------------
comp.run(inputs=stim[0], num_trials=ntrials0, bin_execute=bin_execute)
comp.run(inputs=stim[1], num_trials=ntrials, bin_execute=bin_execute)
# reinitialize after condition was run
colors_hidden_layer.reinitialize([[0, 0, 0]], context=comp)
words_hidden_layer.reinitialize([[0, 0, 0]], context=comp)
response_layer.reinitialize([[0, 0]], context=comp)
task_layer.reinitialize([[0, 0]], context=comp)
# Comp results include concatenation of both the above runs
results.append(comp.results.copy())
comp.reinitialize()
comp.results = []
return results
| [
"numpy.array",
"psyneulink.Logistic",
"psyneulink.Composition",
"psyneulink.TransferMechanism"
] | [((67, 138), 'psyneulink.TransferMechanism', 'pnl.TransferMechanism', ([], {'size': '(3)', 'function': 'pnl.Linear', 'name': '"""COLORS_INPUT"""'}), "(size=3, function=pnl.Linear, name='COLORS_INPUT')\n", (88, 138), True, 'import psyneulink as pnl\n'), ((246, 316), 'psyneulink.TransferMechanism', 'pnl.TransferMechanism', ([], {'size': '(3)', 'function': 'pnl.Linear', 'name': '"""WORDS_INPUT"""'}), "(size=3, function=pnl.Linear, name='WORDS_INPUT')\n", (267, 316), True, 'import psyneulink as pnl\n'), ((421, 490), 'psyneulink.TransferMechanism', 'pnl.TransferMechanism', ([], {'size': '(2)', 'function': 'pnl.Linear', 'name': '"""TASK_INPUT"""'}), "(size=2, function=pnl.Linear, name='TASK_INPUT')\n", (442, 490), True, 'import psyneulink as pnl\n'), ((5601, 5618), 'psyneulink.Composition', 'pnl.Composition', ([], {}), '()\n', (5616, 5618), True, 'import psyneulink as pnl\n'), ((738, 752), 'psyneulink.Logistic', 'pnl.Logistic', ([], {}), '()\n', (750, 752), True, 'import psyneulink as pnl\n'), ((1215, 1236), 'psyneulink.Logistic', 'pnl.Logistic', ([], {'x_0': '(4.0)'}), '(x_0=4.0)\n', (1227, 1236), True, 'import psyneulink as pnl\n'), ((1739, 1760), 'psyneulink.Logistic', 'pnl.Logistic', ([], {'x_0': '(4.0)'}), '(x_0=4.0)\n', (1751, 1760), True, 'import psyneulink as pnl\n'), ((2209, 2223), 'psyneulink.Logistic', 'pnl.Logistic', ([], {}), '()\n', (2221, 2223), True, 'import psyneulink as pnl\n'), ((3462, 3523), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])\n', (3470, 3523), True, 'import numpy as np\n'), ((3698, 3759), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])\n', (3706, 3759), True, 'import numpy as np\n'), ((3932, 3966), 'numpy.array', 'np.array', (['[[1.0, 0.0], [0.0, 1.0]]'], {}), '([[1.0, 0.0], [0.0, 1.0]])\n', (3940, 3966), True, 'import numpy as np\n'), ((4080, 4126), 'numpy.array', 'np.array', (['[[4.0, 0.0], [4.0, 0.0], [4.0, 0.0]]'], {}), '([[4.0, 0.0], [4.0, 0.0], [4.0, 0.0]])\n', (4088, 4126), True, 'import numpy as np\n'), ((4302, 4346), 'numpy.array', 'np.array', (['[[4.0, 4.0, 4.0], [0.0, 0.0, 0.0]]'], {}), '([[4.0, 4.0, 4.0], [0.0, 0.0, 0.0]])\n', (4310, 4346), True, 'import numpy as np\n'), ((4464, 4508), 'numpy.array', 'np.array', (['[[1.5, 0.0, 0.0], [0.0, 1.5, 0.0]]'], {}), '([[1.5, 0.0, 0.0], [0.0, 1.5, 0.0]])\n', (4472, 4508), True, 'import numpy as np\n'), ((4629, 4673), 'numpy.array', 'np.array', (['[[2.5, 0.0, 0.0], [0.0, 2.5, 0.0]]'], {}), '([[2.5, 0.0, 0.0], [0.0, 2.5, 0.0]])\n', (4637, 4673), True, 'import numpy as np\n'), ((4794, 4840), 'numpy.array', 'np.array', (['[[1.5, 0.0], [0.0, 1.5], [0.0, 0.0]]'], {}), '([[1.5, 0.0], [0.0, 1.5], [0.0, 0.0]])\n', (4802, 4840), True, 'import numpy as np\n'), ((5025, 5071), 'numpy.array', 'np.array', (['[[2.5, 0.0], [0.0, 2.5], [0.0, 0.0]]'], {}), '([[2.5, 0.0], [0.0, 2.5], [0.0, 0.0]])\n', (5033, 5071), True, 'import numpy as np\n'), ((5251, 5297), 'numpy.array', 'np.array', (['[[0.0, 4.0], [0.0, 4.0], [0.0, 4.0]]'], {}), '([[0.0, 4.0], [0.0, 4.0], [0.0, 4.0]])\n', (5259, 5297), True, 'import numpy as np\n'), ((5467, 5511), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0], [4.0, 4.0, 4.0]]'], {}), '([[0.0, 0.0, 0.0], [4.0, 4.0, 4.0]])\n', (5475, 5511), True, 'import numpy as np\n'), ((2887, 2907), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (2895, 2907), True, 'import numpy as np\n'), ((3087, 3123), 'numpy.array', 'np.array', (['[[0.0, -4.0], [-4.0, 0.0]]'], {}), '([[0.0, -4.0], [-4.0, 0.0]])\n', (3095, 3123), True, 'import numpy as np\n')] |
import os
import sys
try: import commands
except: pass
import numpy as np
import time
import math
import multiprocessing as mp
import itertools
nn = "\n"
tt = "\t"
ss = "/"
cc = ","
def main():
#[1]
subentwork_identifier_obj = SubnetworkIdentifier()
##End main
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import silhouette_score
from sklearn.mixture import GaussianMixture
class SubnetworkIdentifier(object):
def __init__(self):
#[1]
self.geneset_path = sys.argv[1]
self.ppi_path = sys.argv[2]
self.output_dir = sys.argv[3]
#[2]
self.retrieve_interactions()
#[3]
self.identify_subnetworks()
##End init
def retrieve_interactions(self):
#[1]
self.ppi_set = set([])
#[2]
for ppi_line in open(self.ppi_path):
a, b = ppi_line.split()
key = tuple(sorted([a, b]))
self.ppi_set.add(key)
##End for
##End retrieve_interactions
def identify_subnetworks(self):
#[1]
self.subnetworks_dir = self.output_dir + ss + "subnetworks.dir"
make_path(self.subnetworks_dir)
self.output_path = self.output_dir + ss + "subnetwork_list.txt"
self.output_file = open(self.output_path, 'w')
#[2]
for geneset_line in open(self.geneset_path):
geneset_id = geneset_line.strip().split(tt)[0]
gene_id_list = geneset_line.strip().split(tt)[1:]
geneset_obj = Geneset()
geneset_obj.id = geneset_id
geneset_obj.gene_id_list = sorted(gene_id_list)
self.identify_subnetworks_sub(geneset_obj)
##End for
#[3]
self.output_file.close()
##End identify_subnetworks
def identify_subnetworks_sub(self, geneset_obj):
#[1]
edge_path = self.subnetworks_dir + ss + "%s_edges.txt"%geneset_obj.id
edge_file = open(edge_path, 'w')
#[2]
self.gene_dic = {}
for i, gene_id in enumerate(geneset_obj.gene_id_list):
gene_obj = Gene()
gene_obj.id = gene_id
gene_obj.index = i+1
self.gene_dic[gene_id] = gene_obj
self.gene_dic[gene_obj.index] = gene_obj
##End for
#[2]
for a, b in itertools.combinations(geneset_obj.gene_id_list, 2):
#[2-1]
key = a, b
if key not in self.ppi_set:
continue
##End if
#[2-2]
c, d = [self.gene_dic[x] for x in [a, b]]
edge_line = make_line([c.index, d.index], tt)
edge_file.write(edge_line + nn)
##End for
#[3]
edge_file.close()
embedding_path = self.subnetworks_dir + ss + "%s_embeddings.txt"%geneset_obj.id
log_path = self.subnetworks_dir + ss + "%s_log.txt"%geneset_obj.id
cmd = "nohup deepwalk --input %s --output %s --representation-size 8 --seed 0 > %s"%(edge_path, embedding_path, log_path)
os.system(cmd)
#[4]
embedding_file = open(embedding_path, 'r')
embedding_file.readline()
gene_id_list = []
gene_embedding_arr = []
for embedding_line in embedding_file:
index = int(embedding_line.split()[0])
gene_id = self.gene_dic[index].id
embedding_vec = [float(x) for x in embedding_line.split()[1:]]
gene_id_list.append(gene_id)
gene_embedding_arr.append(embedding_vec)
##End for
#[5]
gene_embedding_arr = np.array(gene_embedding_arr)
clusterer_list = []
max_clusters = int(len(gene_id_list)**0.5)+1
for n_clusters in range(2, max_clusters):
clusterer_obj = GaussianMixture(n_components=n_clusters, random_state=0)
clusterer_obj.fit(gene_embedding_arr)
clusterer_obj.score_ = -clusterer_obj.bic(gene_embedding_arr)
clusterer_obj.n_clusters_ = n_clusters
clusterer_obj.labels_ = clusterer_obj.predict(gene_embedding_arr)
clusterer_list.append(clusterer_obj)
##End for
#[6]
clusterer_obj = sorted(clusterer_list, key=lambda x:x.score_, reverse=True)[0]
subnet_dic = {}
for gene_id, label in zip(gene_id_list, clusterer_obj.labels_):
#[6-1]
if label not in subnet_dic:
subnet_obj = Subnet()
subnet_obj.gene_id_list = []
subnet_dic[label] = subnet_obj
##End if
#[6-2]
subnet_obj = subnet_dic[label]
subnet_obj.gene_id_list.append(gene_id)
##End for
#[7]
subnet_list = sorted(subnet_dic.values(), key=lambda x:len(x.gene_id_list), reverse=True)
subnet_list = list(filter(lambda x:len(x.gene_id_list)>2, subnet_list))
for i, subnet_obj in enumerate(subnet_list):
subnet_obj.id = "%s_%s"%(geneset_obj.id, i+1)
gene_id_line = make_line(sorted(subnet_obj.gene_id_list), cc)
subnet_line = make_line([subnet_obj.id, gene_id_line], tt)
self.output_file.write(subnet_line + nn)
##End for
##End identify_subnetworks_sub
##End SubnetworkIdentifier
class Subnet(object):
def __init__(self):
pass
##End init
##End Subnet
class Gene(object):
def __init__(self):
pass
##End init
##End Gene
class Geneset(object):
def __init__(self):
pass
##End init
##End Geneset
def remove_path(path):
if os.path.exists(path):
try: return commands.getoutput("rm -r %s"%path)
except: return os.system("rm -r %s"%path)
##End if
##End makePath
def make_path(path):
if not os.path.exists(path):
try: return commands.getoutput("mkdir %s"%apth)
except: return os.system("mkdir -p %s"%path)
##End if
##End makePath
def make_line(token_list, sep):
return sep.join(map(str, token_list))
##End makeLine
if __name__ == "__main__" :
main()
sys.exit()
##End if
| [
"os.path.exists",
"os.system",
"sklearn.mixture.GaussianMixture",
"itertools.combinations",
"numpy.array",
"sys.exit",
"commands.getoutput"
] | [((4712, 4732), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (4726, 4732), False, 'import os\n'), ((5159, 5169), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5167, 5169), False, 'import sys\n'), ((1990, 2041), 'itertools.combinations', 'itertools.combinations', (['geneset_obj.gene_id_list', '(2)'], {}), '(geneset_obj.gene_id_list, 2)\n', (2012, 2041), False, 'import itertools\n'), ((2581, 2595), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (2590, 2595), False, 'import os\n'), ((3028, 3056), 'numpy.array', 'np.array', (['gene_embedding_arr'], {}), '(gene_embedding_arr)\n', (3036, 3056), True, 'import numpy as np\n'), ((4885, 4905), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (4899, 4905), False, 'import os\n'), ((3189, 3245), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': 'n_clusters', 'random_state': '(0)'}), '(n_components=n_clusters, random_state=0)\n', (3204, 3245), False, 'from sklearn.mixture import GaussianMixture\n'), ((4748, 4785), 'commands.getoutput', 'commands.getoutput', (["('rm -r %s' % path)"], {}), "('rm -r %s' % path)\n", (4766, 4785), False, 'import commands\n'), ((4921, 4958), 'commands.getoutput', 'commands.getoutput', (["('mkdir %s' % apth)"], {}), "('mkdir %s' % apth)\n", (4939, 4958), False, 'import commands\n'), ((4801, 4829), 'os.system', 'os.system', (["('rm -r %s' % path)"], {}), "('rm -r %s' % path)\n", (4810, 4829), False, 'import os\n'), ((4974, 5005), 'os.system', 'os.system', (["('mkdir -p %s' % path)"], {}), "('mkdir -p %s' % path)\n", (4983, 5005), False, 'import os\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
import time
import os
import numpy as np
from tqdm.auto import tqdm, trange
from collections import OrderedDict
import warnings
NUM_CONV_LAYERS = [1,2,3,4] #2
# NUM_INIT_CHANNELS = [16,32,64,128] #2
NUM_INIT_CHANNELS = np.arange(8,136).astype(int).tolist() #7
FCNN_LAYERS_DEPTH = [1,2,3,4,5,6,7,8] #3
# FCNN_LAYERS_WIDTH = [32,64,128,256,512,1024,2048,4096] #3
FCNN_LAYERS_WIDTH = np.arange(32,2080).astype(int).tolist() #11
POOLING_TYPE = ['max', 'average']
# FCNN LOG DROPOUT RATE -3 ~ 0
def conv_binary_decoder(X):
log_dropout = X[-1]
dropout_rate = np.power(10, log_dropout)
if dropout_rate > 1.0:
warnings.warn("Dropout rate larger than 1.0, P_drop = "+str(dropout_rate))
dropout_rate=1.0
binary_code = X[:-1]
binary_code = (binary_code>=0.5).astype(int)
#print(binary_code)
bits_num_conv_layers = 2
bits_num_init_channels = 7
bits_fcnn_layers_depth = 3
bits_fcnn_layers_width = 11
bits_pooling_type = 1
bits_log_dropout = 1
curr = 0
binary_num_conv_layers = ''.join([str(x) for x in binary_code[curr: curr+bits_num_conv_layers].tolist()])
num_conv_layers = NUM_CONV_LAYERS[int(binary_num_conv_layers, 2)]
curr += bits_num_conv_layers
#print(num_conv_layers)
binary_num_init_channels = ''.join([str(x) for x in binary_code[curr: curr+bits_num_init_channels].tolist()])
num_init_channels = NUM_INIT_CHANNELS[int(binary_num_init_channels, 2)]
curr += bits_num_init_channels
#print(num_init_channels)
binary_fcnn_layers_depth = ''.join([str(x) for x in binary_code[curr: curr+bits_fcnn_layers_depth].tolist()])
fcnn_layers_depth = FCNN_LAYERS_DEPTH[int(binary_fcnn_layers_depth, 2)]
curr += bits_fcnn_layers_depth
#print(fcnn_layers_depth)
binary_fcnn_layers_width = ''.join([str(x) for x in binary_code[curr: curr+bits_fcnn_layers_width].tolist()])
fcnn_layers_width = FCNN_LAYERS_WIDTH[int(binary_fcnn_layers_width, 2)]
curr += bits_fcnn_layers_width
#print(fcnn_layers_width)
binary_pooling_type = ''.join([str(x) for x in binary_code[curr: curr+bits_pooling_type].tolist()])
pooling_type = POOLING_TYPE[int(binary_pooling_type, 2)]
curr += bits_pooling_type
#print(pooling_type)
conv_config = {}
conv_config['num_conv_layers'] = num_conv_layers
conv_config['num_init_channels'] = num_init_channels
conv_config['fcnn_layers_depth'] = fcnn_layers_depth
conv_config['fcnn_layers_width'] = fcnn_layers_width
conv_config['pooling_type'] = pooling_type
conv_config['dropout_rate'] = dropout_rate
return conv_config
def conv_binary_decoder_v2(X):
if X.ndim == 2:
X = np.squeeze(X)
#
log_dropout = X[-1]
dropout_rate = np.power(10, log_dropout)
if dropout_rate > 1.0:
warnings.warn("Dropout rate larger than 1.0, P_drop = "+str(dropout_rate))
dropout_rate=1.0
binary_code = X[:-1]
binary_code = (binary_code>=0.5).astype(int)
#print(binary_code)
bits_num_conv_layers = 2
bits_num_init_channels = 7
bits_fcnn_layers_depth = 3
bits_fcnn_layers_width = 11
bits_pooling_type = 1
bits_log_dropout = 1
curr = 0
binary_num_conv_layers = ''.join([str(x) for x in binary_code[curr: curr+bits_num_conv_layers].tolist()])
num_conv_layers = NUM_CONV_LAYERS[int(binary_num_conv_layers, 2)]
curr += bits_num_conv_layers
#print(num_conv_layers)
binary_num_init_channels = ''.join([str(x) for x in binary_code[curr: curr+bits_num_init_channels].tolist()])
num_init_channels = NUM_INIT_CHANNELS[int(binary_num_init_channels, 2)]
curr += bits_num_init_channels
#print(num_init_channels)
binary_fcnn_layers_depth = ''.join([str(x) for x in binary_code[curr: curr+bits_fcnn_layers_depth].tolist()])
fcnn_layers_depth = FCNN_LAYERS_DEPTH[int(binary_fcnn_layers_depth, 2)]
curr += bits_fcnn_layers_depth
#print(fcnn_layers_depth)
binary_fcnn_layers_width = ''.join([str(x) for x in binary_code[curr: curr+bits_fcnn_layers_width].tolist()])
fcnn_layers_width = FCNN_LAYERS_WIDTH[int(binary_fcnn_layers_width, 2)]
curr += bits_fcnn_layers_width
#print(fcnn_layers_width)
binary_pooling_type = ''.join([str(x) for x in binary_code[curr: curr+bits_pooling_type].tolist()])
pooling_type = POOLING_TYPE[int(binary_pooling_type, 2)]
curr += bits_pooling_type
#print(pooling_type)
conv_config = {}
conv_config['num_conv_layers'] = num_conv_layers
conv_config['num_init_channels'] = num_init_channels
conv_config['fcnn_layers_depth'] = fcnn_layers_depth
conv_config['fcnn_layers_width'] = fcnn_layers_width
conv_config['pooling_type'] = pooling_type
conv_config['dropout_rate'] = dropout_rate
return conv_config
class ConvNet(nn.Module):
def __init__(self, conv_config):
super(ConvNet, self).__init__()
num_conv_layers = conv_config['num_conv_layers']
init_filter_channels = conv_config['num_init_channels']
pooling_type = conv_config['pooling_type']
fcnn_layer_width = conv_config['fcnn_layers_width']
fcnn_layer_depth = conv_config['fcnn_layers_depth']
dropout_rate = conv_config['dropout_rate']
channels = init_filter_channels*np.power(2, np.arange(num_conv_layers)).astype(int)
conv_layers_output_dims = int(32/np.power(2,num_conv_layers))
fcnn_input_dims = channels[-1]*conv_layers_output_dims*conv_layers_output_dims
self.conv_channels = [3] + channels.tolist()
self.fcnn_layers = [fcnn_input_dims] + [fcnn_layer_width]*fcnn_layer_depth + [10]
#print(self.fcnn_layers)
sequential_dict_conv = OrderedDict()
for i_conv in range(num_conv_layers):
in_channels = self.conv_channels[i_conv]
out_channels = self.conv_channels[i_conv+1]
#print(in_channels, out_channels)
conv_key = 'conv'+str(i_conv+1)
act_key = 'act_conv'+str(i_conv+1)
pool_key = 'pool'+str(i_conv+1)
sequential_dict_conv[conv_key] = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1)
sequential_dict_conv[act_key] = nn.ReLU()
if pooling_type == 'max':
sequential_dict_conv[pool_key] = nn.MaxPool2d(2, 2)
elif pooling_type == 'average':
sequential_dict_conv[pool_key] = nn.AvgPool2d(2, 2)
else:
raise Exception('Error: Unrecognized pooling type!')
#
#
self.sequential_conv_layers = nn.Sequential(sequential_dict_conv)
sequential_dict_fcnn = OrderedDict()
num_fcnn_layers = len(self.fcnn_layers)
for i_layer in range(num_fcnn_layers-2):
in_d = self.fcnn_layers[i_layer]
out_d = self.fcnn_layers[i_layer+1]
linear_key = 'linear'+str(i_layer+1)
dropout_key = 'dropout'+str(i_layer+1)
act_key = 'act_linear'+str(i_layer+1)
sequential_dict_fcnn[linear_key] = nn.Linear(in_d, out_d)
sequential_dict_fcnn[dropout_key] = nn.Dropout(p=dropout_rate)
sequential_dict_fcnn[act_key] = nn.ReLU()
#
linear_key = 'linear'+str(num_fcnn_layers-1)
sequential_dict_fcnn[linear_key] = nn.Linear(self.fcnn_layers[-2], self.fcnn_layers[-1])
self.sequential_fcnn_layers = nn.Sequential(sequential_dict_fcnn)
def forward(self, X):
H = self.sequential_conv_layers(X)
H = H.view([-1, self.fcnn_layers[0]])
y = self.sequential_fcnn_layers(H)
return y
# def eval_conv_net_performance(domain, binary_config, max_epochs, mode, device):
# if binary_config.ndim == 2:
# binary_config = binary_config.squeeze()
# conv_config = conv_binary_decoder(binary_config)
# #print(conv_config)
# conv_net = ConvNet(conv_config).to(device)
# #print(conv_net)
# optimizer = optim.SGD(conv_net.parameters(),lr=1e-3,momentum = 0.9,weight_decay=1e-5)
# criterion = nn.CrossEntropyLoss()
# hist_scores = []
# for epoch in trange(max_epochs): # loop over the dataset multiple times
# for i, data in enumerate(domain.train_loader, 0):
# inputs, labels = data[0].to(device), data[1].to(device)
# optimizer.zero_grad()
# preds = conv_net(inputs)
# loss = criterion(preds, labels)
# loss.backward()
# optimizer.step()
# #
# if mode == 'generate':
# # train_acc = domain.metric(conv_net, device, score_type='train_pred_acc')
# # test_acc = domain.metric(conv_net, device, score_type='test_pred_acc')
# nll = domain.metric(conv_net, device, score_type='log_loss')
# hist_scores.append(nll)
# #print('%d-th epoch, train_acc=%.4f, test_acc=%.4f, test_log_loss=%.4f' % (epoch, train_acc, test_acc, nll))
# #
# #
# if mode == 'query':
# score = domain.metric(conv_net, device, score_type='log_loss')
# return score
# elif mode == 'generate':
# return np.array(hist_scores)
# #
def eval_conv_net_performance(domain, binary_config, max_epochs, mode, device):
if binary_config.ndim == 2:
binary_config = binary_config.squeeze()
conv_config = conv_binary_decoder(binary_config)
#print(conv_config)
conv_net = ConvNet(conv_config).to(device)
#print(conv_net)
optimizer = optim.SGD(conv_net.parameters(),lr=1e-3,momentum = 0.9,weight_decay=1e-5)
criterion = nn.CrossEntropyLoss()
hist_scores = []
early_stop_cnt = 0
for epoch in range(max_epochs): # loop over the dataset multiple times
for i, data in enumerate(domain.train_loader, 0):
inputs, labels = data[0].to(device), data[1].to(device)
optimizer.zero_grad()
preds = conv_net(inputs)
loss = criterion(preds, labels)
loss.backward()
optimizer.step()
#
score = domain.metric(conv_net, device, score_type='log_loss')
if epoch == 0:
hist_best = -np.inf
else:
hist_best = np.max(np.array(hist_scores))
#
if score <= hist_best:
early_stop_cnt += 1
else:
early_stop_cnt = 0
#
hist_scores.append(score)
if early_stop_cnt >= 5:
break
#
# print(hist_best)
# print(score)
# print(early_stop_cnt)
# print('')
#
if mode == 'query':
return hist_scores[-1]
elif mode == 'generate':
if len(hist_scores) < max_epochs:
append_hist_scores = [hist_scores[-1]]*(max_epochs-len(hist_scores))
hist_scores = hist_scores + append_hist_scores
#
return np.array(hist_scores)
#
| [
"torch.nn.Dropout",
"torch.nn.ReLU",
"torch.nn.Sequential",
"torch.nn.MaxPool2d",
"numpy.power",
"torch.nn.Conv2d",
"torch.nn.CrossEntropyLoss",
"numpy.array",
"numpy.arange",
"torch.nn.Linear",
"numpy.squeeze",
"torch.nn.AvgPool2d",
"collections.OrderedDict"
] | [((725, 750), 'numpy.power', 'np.power', (['(10)', 'log_dropout'], {}), '(10, log_dropout)\n', (733, 750), True, 'import numpy as np\n'), ((2944, 2969), 'numpy.power', 'np.power', (['(10)', 'log_dropout'], {}), '(10, log_dropout)\n', (2952, 2969), True, 'import numpy as np\n'), ((9923, 9944), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (9942, 9944), True, 'import torch.nn as nn\n'), ((2880, 2893), 'numpy.squeeze', 'np.squeeze', (['X'], {}), '(X)\n', (2890, 2893), True, 'import numpy as np\n'), ((5955, 5968), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5966, 5968), False, 'from collections import OrderedDict\n'), ((6834, 6869), 'torch.nn.Sequential', 'nn.Sequential', (['sequential_dict_conv'], {}), '(sequential_dict_conv)\n', (6847, 6869), True, 'import torch.nn as nn\n'), ((6910, 6923), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6921, 6923), False, 'from collections import OrderedDict\n'), ((7578, 7631), 'torch.nn.Linear', 'nn.Linear', (['self.fcnn_layers[-2]', 'self.fcnn_layers[-1]'], {}), '(self.fcnn_layers[-2], self.fcnn_layers[-1])\n', (7587, 7631), True, 'import torch.nn as nn\n'), ((7679, 7714), 'torch.nn.Sequential', 'nn.Sequential', (['sequential_dict_fcnn'], {}), '(sequential_dict_fcnn)\n', (7692, 7714), True, 'import torch.nn as nn\n'), ((6350, 6412), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels'], {'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels, out_channels, kernel_size=3, padding=1)\n', (6359, 6412), True, 'import torch.nn as nn\n'), ((6457, 6466), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (6464, 6466), True, 'import torch.nn as nn\n'), ((7311, 7333), 'torch.nn.Linear', 'nn.Linear', (['in_d', 'out_d'], {}), '(in_d, out_d)\n', (7320, 7333), True, 'import torch.nn as nn\n'), ((7382, 7408), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout_rate'}), '(p=dropout_rate)\n', (7392, 7408), True, 'import torch.nn as nn\n'), ((7453, 7462), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7460, 7462), True, 'import torch.nn as nn\n'), ((11252, 11273), 'numpy.array', 'np.array', (['hist_scores'], {}), '(hist_scores)\n', (11260, 11273), True, 'import numpy as np\n'), ((380, 397), 'numpy.arange', 'np.arange', (['(8)', '(136)'], {}), '(8, 136)\n', (389, 397), True, 'import numpy as np\n'), ((542, 561), 'numpy.arange', 'np.arange', (['(32)', '(2080)'], {}), '(32, 2080)\n', (551, 561), True, 'import numpy as np\n'), ((5622, 5650), 'numpy.power', 'np.power', (['(2)', 'num_conv_layers'], {}), '(2, num_conv_layers)\n', (5630, 5650), True, 'import numpy as np\n'), ((6554, 6572), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (6566, 6572), True, 'import torch.nn as nn\n'), ((10570, 10591), 'numpy.array', 'np.array', (['hist_scores'], {}), '(hist_scores)\n', (10578, 10591), True, 'import numpy as np\n'), ((6666, 6684), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (6678, 6684), True, 'import torch.nn as nn\n'), ((5540, 5566), 'numpy.arange', 'np.arange', (['num_conv_layers'], {}), '(num_conv_layers)\n', (5549, 5566), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 25 18:04:49 2018
@author: Kazuki
"""
import numpy as np
import pandas as pd
import os, gc
from glob import glob
from tqdm import tqdm
import sys
sys.path.append(f'/home/{os.environ.get("USER")}/PythonLibrary')
import lgbextension as ex
import lightgbm as lgb
from multiprocessing import cpu_count
import utils, utils_metric
utils.start(__file__)
#==============================================================================
SUBMIT_FILE_PATH = '../output/1125-2.csv.gz'
COMMENT = 'top1000 features / each model has 100 features'
EXE_SUBMIT = True
SEED = np.random.randint(9999)
np.random.seed(SEED)
print('SEED:', SEED)
NFOLD = 5
LOOP = 3
param = {
'objective': 'multiclass',
'num_class': 14,
'metric': 'multi_logloss',
'learning_rate': 0.5,
'max_depth': 3,
'num_leaves': 63,
'max_bin': 255,
'min_child_weight': 10,
'min_data_in_leaf': 100,
'reg_lambda': 0.5, # L2 regularization term on weights.
'reg_alpha': 0.5, # L1 regularization term on weights.
'colsample_bytree': 0.5,
'subsample': 0.7,
# 'nthread': 32,
'nthread': cpu_count(),
'bagging_freq': 1,
'verbose':-1,
}
TOTAL_FEATURES = 1000
EACH_FEATURES = 100
# =============================================================================
# def
# =============================================================================
def split_list(l, n):
"""
リストをサブリストに分割する
:param l: リスト
:param n: サブリストの要素数
:return:
"""
for idx in range(0, len(l), n):
yield l[idx:idx + n]
# =============================================================================
# load
# =============================================================================
COL = pd.read_csv(utils.IMP_FILE_BEST).head(TOTAL_FEATURES).feature.tolist()
PREFS = sorted(set([c.split('_')[0] for c in COL]))
files_tr = []
for pref in PREFS:
files_tr += glob(f'../data/train_{pref}*.pkl')
files_te = [f'../feature/test_{c}.pkl' for c in COL]
sw = False
for i in files_te:
if os.path.exists(i)==False:
print(i)
sw = True
if sw:
raise Exception()
X = pd.concat([
pd.read_pickle(f) for f in tqdm(files_tr, mininterval=60)
], axis=1)[COL]
y = utils.load_target().target
#X.drop(DROP, axis=1, inplace=True)
target_dict = {}
target_dict_r = {}
for i,e in enumerate(y.sort_values().unique()):
target_dict[e] = i
target_dict_r[i] = e
y = y.replace(target_dict)
if X.columns.duplicated().sum()>0:
raise Exception(f'duplicated!: { X.columns[X.columns.duplicated()] }')
print('no dup :) ')
print(f'X.shape {X.shape}')
gc.collect()
feature_set = {}
for i,col in enumerate(split_list(COL, EACH_FEATURES)):
feature_set[i] = col
print('each feature size:', len(col))
# =============================================================================
# stacking
# =============================================================================
gc.collect()
model_set = {}
nround_mean = 0
wloss_list = []
oofs = []
for i in feature_set:
dtrain = lgb.Dataset(X[feature_set[i]], y.values, #categorical_feature=CAT,
free_raw_data=False)
gc.collect()
param['seed'] = np.random.randint(9999)
ret, models = lgb.cv(param, dtrain, 99999, nfold=NFOLD,
fobj=utils_metric.wloss_objective,
feval=utils_metric.wloss_metric,
early_stopping_rounds=100, verbose_eval=50,
seed=SEED)
y_pred = ex.eval_oob(X[feature_set[i]], y.values, models, SEED, stratified=True, shuffle=True,
n_class=True)
oofs.append(y_pred)
model_set[i] = models
nround_mean += len(ret['wloss-mean'])
wloss_list.append( ret['wloss-mean'][-1] )
result = f"CV wloss: {np.mean(wloss_list)} + {np.std(wloss_list)}"
print(result)
utils.send_line(result)
# =============================================================================
# test
# =============================================================================
X_test = pd.concat([
pd.read_pickle(f) for f in tqdm(files_te, mininterval=10)
], axis=1)[COL]
gc.collect()
for i in feature_set:
models = model_set[i]
col = feature_set[i]
for j,model in enumerate(tqdm(model_all)):
gc.collect()
y_pred = model.predict(X_test[col])
y_pred = utils_metric.softmax(y_pred)
if i==0:
y_pred_all = y_pred
else:
y_pred_all += y_pred
y_pred_all /= int(LOOP * MOD_N)
sub = pd.read_csv('../input/sample_submission.csv.zip')
df = pd.DataFrame(y_pred_all, columns=sub.columns[1:-1])
sub = pd.concat([sub[['object_id']], df], axis=1)
# class_99
sub.to_pickle(f'../data/y_pred_raw_{__file__}.pkl')
utils.postprocess(sub, method='oli')
#utils.postprocess(sub, weight=weight, method='giba')
print(sub.iloc[:, 1:].idxmax(1).value_counts(normalize=True))
sub.to_csv(SUBMIT_FILE_PATH, index=False, compression='gzip')
png = f'LOG/sub_{__file__}.png'
utils.savefig_sub(sub, png)
utils.send_line('DONE!', png)
# =============================================================================
# submission
# =============================================================================
if EXE_SUBMIT:
print('submit')
utils.submit(SUBMIT_FILE_PATH, COMMENT)
#==============================================================================
utils.end(__file__)
utils.stop_instance()
| [
"utils.send_line",
"utils.stop_instance",
"utils.load_target",
"numpy.random.seed",
"pandas.read_csv",
"utils.postprocess",
"gc.collect",
"utils.start",
"numpy.random.randint",
"utils.savefig_sub",
"numpy.mean",
"glob.glob",
"multiprocessing.cpu_count",
"pandas.DataFrame",
"numpy.std",
... | [((399, 420), 'utils.start', 'utils.start', (['__file__'], {}), '(__file__)\n', (410, 420), False, 'import utils, utils_metric\n'), ((635, 658), 'numpy.random.randint', 'np.random.randint', (['(9999)'], {}), '(9999)\n', (652, 658), True, 'import numpy as np\n'), ((659, 679), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (673, 679), True, 'import numpy as np\n'), ((2812, 2824), 'gc.collect', 'gc.collect', ([], {}), '()\n', (2822, 2824), False, 'import os, gc\n'), ((3139, 3151), 'gc.collect', 'gc.collect', ([], {}), '()\n', (3149, 3151), False, 'import os, gc\n'), ((4066, 4089), 'utils.send_line', 'utils.send_line', (['result'], {}), '(result)\n', (4081, 4089), False, 'import utils, utils_metric\n'), ((4387, 4399), 'gc.collect', 'gc.collect', ([], {}), '()\n', (4397, 4399), False, 'import os, gc\n'), ((4769, 4818), 'pandas.read_csv', 'pd.read_csv', (['"""../input/sample_submission.csv.zip"""'], {}), "('../input/sample_submission.csv.zip')\n", (4780, 4818), True, 'import pandas as pd\n'), ((4824, 4875), 'pandas.DataFrame', 'pd.DataFrame', (['y_pred_all'], {'columns': 'sub.columns[1:-1]'}), '(y_pred_all, columns=sub.columns[1:-1])\n', (4836, 4875), True, 'import pandas as pd\n'), ((4883, 4926), 'pandas.concat', 'pd.concat', (["[sub[['object_id']], df]"], {'axis': '(1)'}), "([sub[['object_id']], df], axis=1)\n", (4892, 4926), True, 'import pandas as pd\n'), ((4991, 5027), 'utils.postprocess', 'utils.postprocess', (['sub'], {'method': '"""oli"""'}), "(sub, method='oli')\n", (5008, 5027), False, 'import utils, utils_metric\n'), ((5243, 5270), 'utils.savefig_sub', 'utils.savefig_sub', (['sub', 'png'], {}), '(sub, png)\n', (5260, 5270), False, 'import utils, utils_metric\n'), ((5271, 5300), 'utils.send_line', 'utils.send_line', (['"""DONE!"""', 'png'], {}), "('DONE!', png)\n", (5286, 5300), False, 'import utils, utils_metric\n'), ((5635, 5654), 'utils.end', 'utils.end', (['__file__'], {}), '(__file__)\n', (5644, 5654), False, 'import utils, utils_metric\n'), ((5655, 5676), 'utils.stop_instance', 'utils.stop_instance', ([], {}), '()\n', (5674, 5676), False, 'import utils, utils_metric\n'), ((1273, 1284), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (1282, 1284), False, 'from multiprocessing import cpu_count\n'), ((2085, 2119), 'glob.glob', 'glob', (['f"""../data/train_{pref}*.pkl"""'], {}), "(f'../data/train_{pref}*.pkl')\n", (2089, 2119), False, 'from glob import glob\n'), ((2427, 2446), 'utils.load_target', 'utils.load_target', ([], {}), '()\n', (2444, 2446), False, 'import utils, utils_metric\n'), ((3245, 3306), 'lightgbm.Dataset', 'lgb.Dataset', (['X[feature_set[i]]', 'y.values'], {'free_raw_data': '(False)'}), '(X[feature_set[i]], y.values, free_raw_data=False)\n', (3256, 3306), True, 'import lightgbm as lgb\n'), ((3364, 3376), 'gc.collect', 'gc.collect', ([], {}), '()\n', (3374, 3376), False, 'import os, gc\n'), ((3397, 3420), 'numpy.random.randint', 'np.random.randint', (['(9999)'], {}), '(9999)\n', (3414, 3420), True, 'import numpy as np\n'), ((3439, 3611), 'lightgbm.cv', 'lgb.cv', (['param', 'dtrain', '(99999)'], {'nfold': 'NFOLD', 'fobj': 'utils_metric.wloss_objective', 'feval': 'utils_metric.wloss_metric', 'early_stopping_rounds': '(100)', 'verbose_eval': '(50)', 'seed': 'SEED'}), '(param, dtrain, 99999, nfold=NFOLD, fobj=utils_metric.wloss_objective,\n feval=utils_metric.wloss_metric, early_stopping_rounds=100,\n verbose_eval=50, seed=SEED)\n', (3445, 3611), True, 'import lightgbm as lgb\n'), ((3719, 3822), 'lgbextension.eval_oob', 'ex.eval_oob', (['X[feature_set[i]]', 'y.values', 'models', 'SEED'], {'stratified': '(True)', 'shuffle': '(True)', 'n_class': '(True)'}), '(X[feature_set[i]], y.values, models, SEED, stratified=True,\n shuffle=True, n_class=True)\n', (3730, 3822), True, 'import lgbextension as ex\n'), ((5514, 5553), 'utils.submit', 'utils.submit', (['SUBMIT_FILE_PATH', 'COMMENT'], {}), '(SUBMIT_FILE_PATH, COMMENT)\n', (5526, 5553), False, 'import utils, utils_metric\n'), ((2211, 2228), 'os.path.exists', 'os.path.exists', (['i'], {}), '(i)\n', (2225, 2228), False, 'import os, gc\n'), ((4007, 4026), 'numpy.mean', 'np.mean', (['wloss_list'], {}), '(wloss_list)\n', (4014, 4026), True, 'import numpy as np\n'), ((4031, 4049), 'numpy.std', 'np.std', (['wloss_list'], {}), '(wloss_list)\n', (4037, 4049), True, 'import numpy as np\n'), ((4503, 4518), 'tqdm.tqdm', 'tqdm', (['model_all'], {}), '(model_all)\n', (4507, 4518), False, 'from tqdm import tqdm\n'), ((4529, 4541), 'gc.collect', 'gc.collect', ([], {}), '()\n', (4539, 4541), False, 'import os, gc\n'), ((4603, 4631), 'utils_metric.softmax', 'utils_metric.softmax', (['y_pred'], {}), '(y_pred)\n', (4623, 4631), False, 'import utils, utils_metric\n'), ((244, 266), 'os.environ.get', 'os.environ.get', (['"""USER"""'], {}), "('USER')\n", (258, 266), False, 'import os, gc\n'), ((2334, 2351), 'pandas.read_pickle', 'pd.read_pickle', (['f'], {}), '(f)\n', (2348, 2351), True, 'import pandas as pd\n'), ((4297, 4314), 'pandas.read_pickle', 'pd.read_pickle', (['f'], {}), '(f)\n', (4311, 4314), True, 'import pandas as pd\n'), ((2361, 2391), 'tqdm.tqdm', 'tqdm', (['files_tr'], {'mininterval': '(60)'}), '(files_tr, mininterval=60)\n', (2365, 2391), False, 'from tqdm import tqdm\n'), ((4324, 4354), 'tqdm.tqdm', 'tqdm', (['files_te'], {'mininterval': '(10)'}), '(files_te, mininterval=10)\n', (4328, 4354), False, 'from tqdm import tqdm\n'), ((1911, 1943), 'pandas.read_csv', 'pd.read_csv', (['utils.IMP_FILE_BEST'], {}), '(utils.IMP_FILE_BEST)\n', (1922, 1943), True, 'import pandas as pd\n')] |
from __future__ import print_function
import torch
from PIL import Image
import inspect
import re
import numpy as np
import os
import time
import collections
import torch.nn as nn
import sys
import math
from torch.nn.modules.module import _addindent
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
# Converts a Tensor into a Numpy array
# |imtype|: the desired type of the converted numpy array
def tensor2im(image_tensor, imtype=np.uint8):
image_numpy = image_tensor[0].cpu().float().numpy() # only draw the first image in a mini-batch
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path):
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
def info(object, spacing=10, collapse=1):
"""Print methods and doc strings.
Takes module, class, list, dictionary, or string."""
methodList = [e for e in dir(object) if isinstance(getattr(object, e), collections.Callable)]
processFunc = collapse and (lambda s: " ".join(s.split())) or (lambda s: s)
print("\n".join(["%s %s" %
(method.ljust(spacing),
processFunc(str(getattr(object, method).__doc__)))
for method in methodList]))
def varname(p):
for line in inspect.getframeinfo(inspect.currentframe().f_back)[3]:
m = re.search(r'\bvarname\s*\(\s*([A-Za-z_][A-Za-z0-9_]*)\s*\)', line)
if m:
return m.group(1)
def print_numpy(x, val=True, shp=False):
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def _process(ids):
str_ids = ids.split(',')
gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
gpu_ids.append(id)
return gpu_ids
def weights_init(m):
"""
init random weights
"""
# if isinstance(m, nn.Conv2d):
# nn.init.xavier_normal(m.weight.data)
# m.bias.data.zero_()
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.InstanceNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
# print('linear layer!')
m.weight.data.normal_(std=0.05)
m.bias.data.zero_()
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
class Timer(object):
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if average:
return self.average_time
else:
return self.diff
def remove(file_name):
try:
os.remove(file_name)
except:
pass
def print_log(msg, file=None, init=False):
print(msg)
if file is None:
pass
else:
if init:
remove(file)
with open(file, 'a') as log_file:
log_file.write('%s\n' % msg)
def show_jot_opt(opt):
"""
by Hongyang.
A starter for logging the training/test process
"""
if opt.phase == 'test':
file_name = os.path.join(opt.save_folder, 'opt_{:s}.txt'.format(opt.phase))
elif opt.phase == 'train':
file_name = os.path.join(opt.save_folder,
'opt_{:s}_START_epoch_{:d}_iter_{:d}_END_{:d}.txt'.format(
opt.phase, opt.start_epoch, opt.start_iter, opt.max_epoch))
elif opt.phase == 'train_val':
file_name = os.path.join(opt.save_folder,
'opt_{:s}_START_epoch_0_END_{:d}.txt'.format(
opt.phase, opt.max_epoch))
opt.file_name = file_name
args = vars(opt)
print_log('Experiment: {:s}'.format(opt.experiment_name), file_name, init=True)
if opt.phase == 'train':
print_log('------------ Training Options -------------', file_name)
elif opt.phase == 'test':
print_log('------------ Test Options -----------------', file_name)
elif opt.phase == 'train_val':
print_log('------------ Train and Test Options -----------------', file_name)
for k, v in sorted(args.items()):
print_log('%s: %s' % (str(k), str(v)), file_name)
print_log('------------------ End --------------------', file_name)
return opt
def torch_summarize(model, show_weights=True, show_parameters=True):
"""Summarizes torch model by showing trainable parameters and weights."""
tmpstr = model.__class__.__name__ + ' (\n'
params_num = 0
for key, module in model._modules.items():
# if it contains layers let call it recursively to get params and weights
if type(module) in [
torch.nn.modules.container.Container,
torch.nn.modules.container.Sequential
]:
modstr = torch_summarize(module)
else:
modstr = module.__repr__()
if isinstance(modstr, str):
modstr = _addindent(modstr, 2)
elif isinstance(modstr, tuple):
modstr = _addindent(modstr[0], 2)
params = sum([np.prod(p.size()) for p in module.parameters()])
weights = tuple([tuple(p.size()) for p in module.parameters()])
# rest = b if b > 0 else params
# params_num = params_num + rest
params_num += params
tmpstr += ' (' + key + '): ' + modstr
if show_weights:
tmpstr += ', weights={}'.format(weights)
if show_parameters:
tmpstr += ', parameters={}'.format(params)
tmpstr += '\n'
tmpstr = tmpstr + ')'
return tmpstr, params_num * 4. / (1024**2)
class AverageMeter(object):
"""Computes and stores the average and current value
Imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count | [
"os.remove",
"os.makedirs",
"math.sqrt",
"numpy.median",
"numpy.std",
"os.path.exists",
"torch.nn.modules.module._addindent",
"numpy.transpose",
"time.time",
"torch.abs",
"numpy.min",
"numpy.mean",
"numpy.max",
"inspect.currentframe",
"PIL.Image.fromarray",
"re.search"
] | [((1090, 1118), 'PIL.Image.fromarray', 'Image.fromarray', (['image_numpy'], {}), '(image_numpy)\n', (1105, 1118), False, 'from PIL import Image\n'), ((1767, 1838), 're.search', 're.search', (['"""\\\\bvarname\\\\s*\\\\(\\\\s*([A-Za-z_][A-Za-z0-9_]*)\\\\s*\\\\)"""', 'line'], {}), "('\\\\bvarname\\\\s*\\\\(\\\\s*([A-Za-z_][A-Za-z0-9_]*)\\\\s*\\\\)', line)\n", (1776, 1838), False, 'import re\n'), ((2385, 2405), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2399, 2405), False, 'import os\n'), ((2415, 2432), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (2426, 2432), False, 'import os\n'), ((3752, 3763), 'time.time', 'time.time', ([], {}), '()\n', (3761, 3763), False, 'import time\n'), ((4108, 4128), 'os.remove', 'os.remove', (['file_name'], {}), '(file_name)\n', (4117, 4128), False, 'import os\n'), ((2969, 2987), 'math.sqrt', 'math.sqrt', (['(2.0 / n)'], {}), '(2.0 / n)\n', (2978, 2987), False, 'import math\n'), ((3019, 3037), 'math.sqrt', 'math.sqrt', (['(2.0 / n)'], {}), '(2.0 / n)\n', (3028, 3037), False, 'import math\n'), ((3818, 3829), 'time.time', 'time.time', ([], {}), '()\n', (3827, 3829), False, 'import time\n'), ((6406, 6427), 'torch.nn.modules.module._addindent', '_addindent', (['modstr', '(2)'], {}), '(modstr, 2)\n', (6416, 6427), False, 'from torch.nn.modules.module import _addindent\n'), ((633, 669), 'numpy.transpose', 'np.transpose', (['image_numpy', '(1, 2, 0)'], {}), '(image_numpy, (1, 2, 0))\n', (645, 669), True, 'import numpy as np\n'), ((902, 928), 'torch.abs', 'torch.abs', (['param.grad.data'], {}), '(param.grad.data)\n', (911, 928), False, 'import torch\n'), ((1720, 1742), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (1740, 1742), False, 'import inspect\n'), ((6489, 6513), 'torch.nn.modules.module._addindent', '_addindent', (['modstr[0]', '(2)'], {}), '(modstr[0], 2)\n', (6499, 6513), False, 'from torch.nn.modules.module import _addindent\n'), ((2129, 2139), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (2136, 2139), True, 'import numpy as np\n'), ((2141, 2150), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (2147, 2150), True, 'import numpy as np\n'), ((2152, 2161), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (2158, 2161), True, 'import numpy as np\n'), ((2163, 2175), 'numpy.median', 'np.median', (['x'], {}), '(x)\n', (2172, 2175), True, 'import numpy as np\n'), ((2177, 2186), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (2183, 2186), True, 'import numpy as np\n')] |
"""
Functions to convert between local enu, local llh, and global xyz coordinates
Translated from Matlab
"""
import numpy as np
from . import datums
def xyz2llh(xyz, datum=(0, 0)):
"""
XYZ2LLH calculates longitude, latitude, and height from global cartesisan coordinates.
LLH = xyz2llh(XYZ, DATUM) calculates longitude(deg), latitude(deg), and height(m) on the ellipsoid
specified by DATUM from the global cartestian coordinates given in the nx3(n=number of coordinate triples)
matrix XYZ.
DATUM can either be a vector the first two elements of which give da and df,
or it can be a string containing the name of a datum that is resolved
by the function DATUMS function.
Note that longitude occupies the first row of LLH.
See DATUMS for more information on datum parameters.
:param xyz: [x, y, z]
:type xyz: numpy array
:param datum: name of datum
:type datum: string
:returns: [lon, lat, height]
:rtype: numpy array
"""
# Check input arguments
if type(datum) == str:
datum = datums.get_datums(datum);
if any(np.isnan(datum)):
raise ValueError('Could not resolve datum name.');
da = datum[0];
df = datum[1];
if np.shape(xyz)[1] != 3:
raise TypeError('Input xyz MUST be nx3.');
# Set constants
a = 6378137 - da;
f = 1 / 298.2572235630 - df;
b = (1-f) * a;
e2 = 2 * f - np.square(f);
E2 = (np.square(a) - np.square(b)) / np.square(b);
# Calculate longitude, latitude, and height
llh = np.zeros(np.shape(xyz));
p = np.sqrt(np.square(xyz[:, 0]) + np.square(xyz[:, 1]));
llh[:, 0] = np.arctan2(xyz[:, 1], xyz[:, 0]);
theta = np.arctan(np.divide((xyz[:, 2] * a), (p * b)));
llh[:, 1] = np.arctan((xyz[:, 2] + E2 * b * np.power(np.sin(theta), 3)) /
(p - e2 * a * np.power(np.cos(theta), 3)) );
N = a / np.sqrt(1 - e2 * np.square(np.sin(llh[:, 1])));
llh[:, 2] = p / np.cos(llh[:, 1]) - N;
# Convert to degrees
llh[:, 0:2] = llh[:, 0:2]*57.295779513082323;
return llh;
def llh2xyz(llh, datum=(0, 0)):
"""
LLH2XYZ Calculates global cartesisan coordinates from longitude, latitude, and height.
XYZ=llh2xyz(llh,DATUM) calculates global cartestian coordinates
given the nx3 (n = number of coordinate triples) matrix LLH that contains
longitude (deg), latitude (deg), and height (m) on the ellipsoid
specified by DATUM. DATUM can either be a vector the first two elements
of which give da and df, or it can be a string containing the name of a
datum that is resolved by the function DATUMS function.
Note that longitude occupies the first row of LLH.
See DATUMS for more information on datum parameters.
:param llh: [lon, lat, height]
:type llh: numpy array
:param datum: name of datum
:type datum: string
"""
# Check input arguments
if type(datum) == str:
datum = datums.get_datums(datum);
if any(np.isnan(datum)):
raise ValueError('Could not resolve datum name.');
da = datum[0];
df = datum[1];
if np.shape(llh)[1] != 3:
raise TypeError('Input llh MUST be nx3.');
# Ellipsoid parameters
a = 6378137 - da;
f = 1 / 298.257223563 - df;
b = (1-f) * a;
# Convert degrees to radians
phi = llh[:, 1] * np.pi / 180; # lat
lam = llh[:, 0] * np.pi / 180; # lon
# Convert llh to xyz
XYZ = np.zeros(np.shape(llh));
N = np.square(a) / np.sqrt(np.square(a) * np.square(np.cos(phi)) + np.square(b) * np.square(np.sin(phi)));
XYZ[:, 0] = (N + llh[:, 2]) * np.cos(phi) * np.cos(lam);
XYZ[:, 1] = (N + llh[:, 2]) * np.cos(phi) * np.sin(lam);
XYZ[:, 2] = (np.square(b) * N / np.square(a) + llh[:, 2] ) * np.sin(phi);
return XYZ;
def xyz2enum(origin):
"""
XYZ2ENUM Returns a global to local transformation matrix.
T=xyz2enum(ORIGIN) Returns a transformation matrix that
tranforms coordinates in a global ECEF cartesian system
into to a local coordinate system aligned with the geographic
directions at the position ORIGIN. ORIGIN should contain a
longitude and a latitude pair (degrees). T is 3x3.
:param origin: [longitude, latitude]
:type origin: np array
"""
# Check input arguments
if len(origin) < 2:
raise ValueError('Input origin must have 2 elements, longitude and latitude (degrees).');
# Convert to radians and evaluate trigonometric functions
origin = np.multiply(origin, np.pi / 180);
s = np.sin(origin);
c = np.cos(origin);
# Make transformation matrix
T = np.array([[-s[0], c[0], 0],
[-s[1]*c[0], -s[1]*s[0], c[1]],
[c[1]*c[0], c[1]*s[0], s[1]]]);
return T;
def xyz2enu(d, origin, dcov=None):
"""
XYZ2ENU Transforms from global cartestian to local cartesian.
[E,ECOV]=xyz2enu(D,DCOV,ORIGIN) transforms data vector D and
data covariance DCOV from a global cartesian (XYZ) coordinate
system to a local coordinate system aligned with the geographic
directions at the position ORIGIN.
D should be either 3nx1 or 3xn (n = number of individual vectors).
DCOV should be 3nx3n.
ORIGIN should be a vector of length 2 or 3. If length 2, ORIGIN
is taken as a longitude, latitude pair (degrees); if length 3,
ORIGIN is taken as an XYZ station position. E is matrix (vector)
of transformed coordinates the same size as input D. ECOV is a
matrix containing the transformed covariance.
E=xyz2enu(D,ORIGIN) behaves as above without a data covariance matrix.
:param d: nx3 np array of x, y, z values
:type d: numpy array
:param origin: 1x3 np array (x, y, z) or 1x2 np.array (lon, lat)
:type origin: numpy array
:param dcov: 3x3 np array
:type dcov: numpy array
"""
# Check input arguments
if len(origin) > 2:
origin = np.reshape(origin, (1, 3));
origin = xyz2llh(origin);
origin = origin[0]; # 1x3 1D array, contains llh
# Make transformation matrix
Tm = xyz2enum(origin);
# Transform
e = np.dot(Tm, d.T);
if dcov is not None:
ecov = np.dot(np.dot(Tm, dcov), Tm.T);
else:
ecov = None;
return e.T, ecov;
def enu2xyz(d, origin, dcov=None):
"""
ENU2XYZ Transforms from global cartestian to local cartesian.
[E,ECOV]=xyz2enu(D,DCOV,ORIGIN) transforms data vector D and
data covariance DCOV from a local cartesian (ENU) coordinate
system aligned with the geographic directions at the position ORIGIN
to a global (XYZ) coordinate system.
D should be either 3nx1 or 3xn (n = number of individual vectors).
DCOV should be 3nx3n.
ORIGIN should be a vector of length 2 or 3. If length 2, ORIGIN
is taken as a longitude, latitude pair (degrees); if length 3,
ORIGIN is taken as an XYZ station position. E is matrix (vector)
of transformed coordinates the same size as input D. ECOV is a
matrix containing the transformed covariance.
E=xyz2enu(D,ORIGIN) behaves as above without a data covariance matrix.
:param d: nx3 np array of e, n, u values
:type d: numpy array
:param origin: 1x3 np array (x, y, z) or 1x2 np.array (lon, lat)
:type origin: numpy array
:param dcov: 3x3 np array
:type dcov: numpy array
"""
# Check input arguments
if len(origin) > 2:
origin = np.reshape(origin, (1, 3));
origin = xyz2llh(origin);
origin = origin[0]; # 1x3 1D array, contains llh
# Make transformation matrix
Tm = xyz2enum(origin);
Tminv = np.linalg.inv(Tm);
# Transform
e = np.dot(Tminv, d.T);
if dcov is not None:
ecov = np.dot(np.dot(Tminv, dcov), Tminv.T);
else:
ecov = None;
return e.T, ecov;
| [
"numpy.divide",
"numpy.multiply",
"numpy.arctan2",
"numpy.square",
"numpy.isnan",
"numpy.shape",
"numpy.sin",
"numpy.array",
"numpy.linalg.inv",
"numpy.cos",
"numpy.reshape",
"numpy.dot"
] | [((1649, 1681), 'numpy.arctan2', 'np.arctan2', (['xyz[:, 1]', 'xyz[:, 0]'], {}), '(xyz[:, 1], xyz[:, 0])\n', (1659, 1681), True, 'import numpy as np\n'), ((4555, 4587), 'numpy.multiply', 'np.multiply', (['origin', '(np.pi / 180)'], {}), '(origin, np.pi / 180)\n', (4566, 4587), True, 'import numpy as np\n'), ((4597, 4611), 'numpy.sin', 'np.sin', (['origin'], {}), '(origin)\n', (4603, 4611), True, 'import numpy as np\n'), ((4621, 4635), 'numpy.cos', 'np.cos', (['origin'], {}), '(origin)\n', (4627, 4635), True, 'import numpy as np\n'), ((4679, 4782), 'numpy.array', 'np.array', (['[[-s[0], c[0], 0], [-s[1] * c[0], -s[1] * s[0], c[1]], [c[1] * c[0], c[1] *\n s[0], s[1]]]'], {}), '([[-s[0], c[0], 0], [-s[1] * c[0], -s[1] * s[0], c[1]], [c[1] * c[0\n ], c[1] * s[0], s[1]]])\n', (4687, 4782), True, 'import numpy as np\n'), ((6215, 6230), 'numpy.dot', 'np.dot', (['Tm', 'd.T'], {}), '(Tm, d.T)\n', (6221, 6230), True, 'import numpy as np\n'), ((7743, 7760), 'numpy.linalg.inv', 'np.linalg.inv', (['Tm'], {}), '(Tm)\n', (7756, 7760), True, 'import numpy as np\n'), ((7787, 7805), 'numpy.dot', 'np.dot', (['Tminv', 'd.T'], {}), '(Tminv, d.T)\n', (7793, 7805), True, 'import numpy as np\n'), ((1418, 1430), 'numpy.square', 'np.square', (['f'], {}), '(f)\n', (1427, 1430), True, 'import numpy as np\n'), ((1473, 1485), 'numpy.square', 'np.square', (['b'], {}), '(b)\n', (1482, 1485), True, 'import numpy as np\n'), ((1555, 1568), 'numpy.shape', 'np.shape', (['xyz'], {}), '(xyz)\n', (1563, 1568), True, 'import numpy as np\n'), ((1705, 1736), 'numpy.divide', 'np.divide', (['(xyz[:, 2] * a)', '(p * b)'], {}), '(xyz[:, 2] * a, p * b)\n', (1714, 1736), True, 'import numpy as np\n'), ((3492, 3505), 'numpy.shape', 'np.shape', (['llh'], {}), '(llh)\n', (3500, 3505), True, 'import numpy as np\n'), ((3516, 3528), 'numpy.square', 'np.square', (['a'], {}), '(a)\n', (3525, 3528), True, 'import numpy as np\n'), ((3667, 3678), 'numpy.cos', 'np.cos', (['lam'], {}), '(lam)\n', (3673, 3678), True, 'import numpy as np\n'), ((3728, 3739), 'numpy.sin', 'np.sin', (['lam'], {}), '(lam)\n', (3734, 3739), True, 'import numpy as np\n'), ((3806, 3817), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (3812, 3817), True, 'import numpy as np\n'), ((6009, 6035), 'numpy.reshape', 'np.reshape', (['origin', '(1, 3)'], {}), '(origin, (1, 3))\n', (6019, 6035), True, 'import numpy as np\n'), ((7550, 7576), 'numpy.reshape', 'np.reshape', (['origin', '(1, 3)'], {}), '(origin, (1, 3))\n', (7560, 7576), True, 'import numpy as np\n'), ((1105, 1120), 'numpy.isnan', 'np.isnan', (['datum'], {}), '(datum)\n', (1113, 1120), True, 'import numpy as np\n'), ((1232, 1245), 'numpy.shape', 'np.shape', (['xyz'], {}), '(xyz)\n', (1240, 1245), True, 'import numpy as np\n'), ((1442, 1454), 'numpy.square', 'np.square', (['a'], {}), '(a)\n', (1451, 1454), True, 'import numpy as np\n'), ((1457, 1469), 'numpy.square', 'np.square', (['b'], {}), '(b)\n', (1466, 1469), True, 'import numpy as np\n'), ((1587, 1607), 'numpy.square', 'np.square', (['xyz[:, 0]'], {}), '(xyz[:, 0])\n', (1596, 1607), True, 'import numpy as np\n'), ((1610, 1630), 'numpy.square', 'np.square', (['xyz[:, 1]'], {}), '(xyz[:, 1])\n', (1619, 1630), True, 'import numpy as np\n'), ((1972, 1989), 'numpy.cos', 'np.cos', (['llh[:, 1]'], {}), '(llh[:, 1])\n', (1978, 1989), True, 'import numpy as np\n'), ((3025, 3040), 'numpy.isnan', 'np.isnan', (['datum'], {}), '(datum)\n', (3033, 3040), True, 'import numpy as np\n'), ((3152, 3165), 'numpy.shape', 'np.shape', (['llh'], {}), '(llh)\n', (3160, 3165), True, 'import numpy as np\n'), ((3653, 3664), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (3659, 3664), True, 'import numpy as np\n'), ((3714, 3725), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (3720, 3725), True, 'import numpy as np\n'), ((6279, 6295), 'numpy.dot', 'np.dot', (['Tm', 'dcov'], {}), '(Tm, dcov)\n', (6285, 6295), True, 'import numpy as np\n'), ((7854, 7873), 'numpy.dot', 'np.dot', (['Tminv', 'dcov'], {}), '(Tminv, dcov)\n', (7860, 7873), True, 'import numpy as np\n'), ((3777, 3789), 'numpy.square', 'np.square', (['a'], {}), '(a)\n', (3786, 3789), True, 'import numpy as np\n'), ((3539, 3551), 'numpy.square', 'np.square', (['a'], {}), '(a)\n', (3548, 3551), True, 'import numpy as np\n'), ((3579, 3591), 'numpy.square', 'np.square', (['b'], {}), '(b)\n', (3588, 3591), True, 'import numpy as np\n'), ((3758, 3770), 'numpy.square', 'np.square', (['b'], {}), '(b)\n', (3767, 3770), True, 'import numpy as np\n'), ((1800, 1813), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1806, 1813), True, 'import numpy as np\n'), ((1870, 1883), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (1876, 1883), True, 'import numpy as np\n'), ((1931, 1948), 'numpy.sin', 'np.sin', (['llh[:, 1]'], {}), '(llh[:, 1])\n', (1937, 1948), True, 'import numpy as np\n'), ((3564, 3575), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (3570, 3575), True, 'import numpy as np\n'), ((3604, 3615), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (3610, 3615), True, 'import numpy as np\n')] |
import torch
import numpy
from deep_signature.nn.datasets import DeepSignatureTupletsDataset
from deep_signature.nn.datasets import DeepSignatureEuclideanCurvatureTupletsOnlineDataset
from deep_signature.nn.datasets import DeepSignatureEquiaffineCurvatureTupletsOnlineDataset
from deep_signature.nn.datasets import DeepSignatureAffineCurvatureTupletsOnlineDataset
from deep_signature.nn.networks import DeepSignatureCurvatureNet
from deep_signature.nn.losses import CurvatureLoss
from deep_signature.nn.losses import CurvatureLoss
from deep_signature.nn.trainers import ModelTrainer
from common import settings
from common import utils as common_utils
from argparse import ArgumentParser
if __name__ == '__main__':
torch.set_default_dtype(torch.float64)
parser = ArgumentParser()
parser.add_argument("--group", dest="group")
parser.add_argument("--epochs", dest="epochs", default=settings.curvature_default_epochs, type=int)
parser.add_argument("--continue_training", dest="continue_training", default=settings.curvature_default_continue_training, type=bool)
parser.add_argument("--train_buffer_size", dest="train_buffer_size", default=settings.curvature_default_train_buffer_size, type=int)
parser.add_argument("--validation_buffer_size", dest="validation_buffer_size", default=settings.curvature_default_validation_buffer_size, type=int)
parser.add_argument("--train_batch_size", dest="train_batch_size", default=settings.curvature_default_train_batch_size, type=int)
parser.add_argument("--validation_batch_size", dest="validation_batch_size", default=settings.curvature_default_validation_batch_size, type=int)
parser.add_argument("--train_dataset_size", dest="train_dataset_size", default=settings.curvature_default_train_dataset_size, type=int)
parser.add_argument("--validation_dataset_size", dest="validation_dataset_size", default=settings.curvature_default_validation_dataset_size, type=int)
parser.add_argument("--learning_rate", dest="learning_rate", default=settings.curvature_default_learning_rate, type=float)
parser.add_argument("--validation_split", dest="validation_split", default=settings.curvature_default_validation_split, type=float)
parser.add_argument("--sampling_ratio", dest="sampling_ratio", default=settings.curvature_default_sampling_ratio, type=float)
parser.add_argument("--supporting_points_count", dest="supporting_points_count", default=settings.curvature_default_supporting_points_count, type=int)
parser.add_argument("--sample_points_count", dest="sample_points_count", default=settings.curvature_default_sample_points_count, type=int)
parser.add_argument("--multimodality", dest="multimodality", default=settings.curvature_default_multimodality, type=int)
parser.add_argument("--offset_length", dest="offset_length", default=settings.curvature_default_offset_length, type=int)
parser.add_argument("--num_workers_train", dest="num_workers_train", default=settings.curvature_default_num_workers_train, type=int)
parser.add_argument("--num_workers_validation", dest="num_workers_validation", default=settings.curvature_default_num_workers_validation, type=int)
parser.add_argument("--negative_examples_count", dest="negative_examples_count", default=settings.curvature_default_negative_examples_count, type=int)
parser.add_argument("--history_size", dest="history_size", default=settings.curvature_default_history_size, type=int)
args = parser.parse_args()
OnlineDataset = None
results_base_dir_path = None
if args.group == 'euclidean':
OnlineDataset = DeepSignatureEuclideanCurvatureTupletsOnlineDataset
results_base_dir_path = settings.level_curves_euclidean_curvature_tuplets_results_dir_path
elif args.group == 'equiaffine':
OnlineDataset = DeepSignatureEquiaffineCurvatureTupletsOnlineDataset
results_base_dir_path = settings.level_curves_equiaffine_curvature_tuplets_results_dir_path
elif args.group == 'affine':
OnlineDataset = DeepSignatureAffineCurvatureTupletsOnlineDataset
results_base_dir_path = settings.level_curves_affine_curvature_tuplets_results_dir_path
train_dataset = OnlineDataset(
dataset_size=args.train_dataset_size,
dir_path=settings.level_curves_dir_path_train,
sampling_ratio=args.sampling_ratio,
multimodality=args.multimodality,
replace=True,
buffer_size=args.train_buffer_size,
num_workers=args.num_workers_train,
supporting_points_count=args.supporting_points_count,
offset_length=args.offset_length,
negative_examples_count=args.negative_examples_count)
validation_dataset = OnlineDataset(
dataset_size=args.validation_dataset_size,
dir_path=settings.level_curves_dir_path_validation,
sampling_ratio=args.sampling_ratio,
multimodality=args.multimodality,
replace=False,
buffer_size=args.validation_buffer_size,
num_workers=args.num_workers_validation,
supporting_points_count=args.supporting_points_count,
offset_length=args.offset_length,
negative_examples_count=args.negative_examples_count)
validation_dataset.start()
validation_dataset.stop()
train_dataset.start()
model = DeepSignatureCurvatureNet(sample_points=args.sample_points_count).cuda()
print(model)
if args.continue_training:
latest_subdir = common_utils.get_latest_subdirectory(results_base_dir_path)
results = numpy.load(f"{latest_subdir}/results.npy", allow_pickle=True).item()
model.load_state_dict(torch.load(results['model_file_path'], map_location=torch.device('cuda')))
optimizer = torch.optim.LBFGS(model.parameters(), lr=args.learning_rate, line_search_fn='strong_wolfe', history_size=args.history_size)
curvature_loss_fn = CurvatureLoss()
model_trainer = ModelTrainer(model=model, loss_functions=[curvature_loss_fn], optimizer=optimizer)
model_trainer.fit(
train_dataset=train_dataset,
validation_dataset=validation_dataset,
epochs=args.epochs,
train_batch_size=args.train_batch_size,
validation_batch_size=args.validation_batch_size,
validation_split=args.validation_split,
results_base_dir_path=results_base_dir_path)
| [
"deep_signature.nn.losses.CurvatureLoss",
"numpy.load",
"argparse.ArgumentParser",
"deep_signature.nn.networks.DeepSignatureCurvatureNet",
"torch.set_default_dtype",
"deep_signature.nn.trainers.ModelTrainer",
"torch.device",
"common.utils.get_latest_subdirectory"
] | [((721, 759), 'torch.set_default_dtype', 'torch.set_default_dtype', (['torch.float64'], {}), '(torch.float64)\n', (744, 759), False, 'import torch\n'), ((774, 790), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (788, 790), False, 'from argparse import ArgumentParser\n'), ((5859, 5874), 'deep_signature.nn.losses.CurvatureLoss', 'CurvatureLoss', ([], {}), '()\n', (5872, 5874), False, 'from deep_signature.nn.losses import CurvatureLoss\n'), ((5895, 5982), 'deep_signature.nn.trainers.ModelTrainer', 'ModelTrainer', ([], {'model': 'model', 'loss_functions': '[curvature_loss_fn]', 'optimizer': 'optimizer'}), '(model=model, loss_functions=[curvature_loss_fn], optimizer=\n optimizer)\n', (5907, 5982), False, 'from deep_signature.nn.trainers import ModelTrainer\n'), ((5442, 5501), 'common.utils.get_latest_subdirectory', 'common_utils.get_latest_subdirectory', (['results_base_dir_path'], {}), '(results_base_dir_path)\n', (5478, 5501), True, 'from common import utils as common_utils\n'), ((5296, 5361), 'deep_signature.nn.networks.DeepSignatureCurvatureNet', 'DeepSignatureCurvatureNet', ([], {'sample_points': 'args.sample_points_count'}), '(sample_points=args.sample_points_count)\n', (5321, 5361), False, 'from deep_signature.nn.networks import DeepSignatureCurvatureNet\n'), ((5520, 5581), 'numpy.load', 'numpy.load', (['f"""{latest_subdir}/results.npy"""'], {'allow_pickle': '(True)'}), "(f'{latest_subdir}/results.npy', allow_pickle=True)\n", (5530, 5581), False, 'import numpy\n'), ((5671, 5691), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (5683, 5691), False, 'import torch\n')] |
from __future__ import print_function, absolute_import, division
import numpy as np
from numba import unittest_support as unittest
from numba import roc, intp
WAVESIZE = 64
@roc.jit(device=True)
def wave_reduce(val):
tid = roc.get_local_id(0)
laneid = tid % WAVESIZE
width = WAVESIZE // 2
while width:
if laneid < width:
val[laneid] += val[laneid + width]
val[laneid + width] = -1 # debug
roc.wavebarrier()
width = width // 2
# First thread has the result
roc.wavebarrier()
return val[0]
@roc.jit
def kernel_warp_reduce(inp, out):
idx = roc.get_group_id(0)
val = inp[idx]
out[idx] = wave_reduce(val)
@roc.jit
def kernel_flat_reduce(inp, out):
out[0] = wave_reduce(inp)
class TestReduction(unittest.TestCase):
def template_wave_reduce_int(self, dtype):
numblk = 2
inp = np.arange(numblk * WAVESIZE, dtype=dtype).reshape(numblk, WAVESIZE)
inp_cpy = np.copy(inp)
out = np.zeros((numblk,))
kernel_warp_reduce[numblk, WAVESIZE](inp, out)
np.testing.assert_equal(out, inp_cpy.sum(axis=1))
def test_wave_reduce_intp(self):
self.template_wave_reduce_int(np.intp)
def test_wave_reduce_int32(self):
self.template_wave_reduce_int(np.int32)
def template_wave_reduce_real(self, dtype):
numblk = 2
inp = np.linspace(0, 1, numblk * WAVESIZE).astype(dtype)
inp = inp.reshape(numblk, WAVESIZE)
inp_cpy = np.copy(inp)
out = np.zeros((numblk,))
kernel_warp_reduce[numblk, WAVESIZE](inp, out)
np.testing.assert_allclose(out, inp_cpy.sum(axis=1))
def test_wave_reduce_float64(self):
self.template_wave_reduce_real(np.float64)
def test_wave_reduce_float32(self):
self.template_wave_reduce_real(np.float32)
def test_flat_reduce(self):
inp = np.arange(WAVESIZE) # destroyed in kernel
out = np.zeros((1,))
kernel_flat_reduce[1, WAVESIZE](inp, out)
np.testing.assert_allclose(out[0], np.arange(WAVESIZE).sum())
if __name__ == '__main__':
unittest.main()
| [
"numba.unittest_support.main",
"numba.roc.get_local_id",
"numpy.copy",
"numba.roc.jit",
"numpy.zeros",
"numpy.arange",
"numpy.linspace",
"numba.roc.wavebarrier",
"numba.roc.get_group_id"
] | [((178, 198), 'numba.roc.jit', 'roc.jit', ([], {'device': '(True)'}), '(device=True)\n', (185, 198), False, 'from numba import roc, intp\n'), ((231, 250), 'numba.roc.get_local_id', 'roc.get_local_id', (['(0)'], {}), '(0)\n', (247, 250), False, 'from numba import roc, intp\n'), ((534, 551), 'numba.roc.wavebarrier', 'roc.wavebarrier', ([], {}), '()\n', (549, 551), False, 'from numba import roc, intp\n'), ((624, 643), 'numba.roc.get_group_id', 'roc.get_group_id', (['(0)'], {}), '(0)\n', (640, 643), False, 'from numba import roc, intp\n'), ((2124, 2139), 'numba.unittest_support.main', 'unittest.main', ([], {}), '()\n', (2137, 2139), True, 'from numba import unittest_support as unittest\n'), ((450, 467), 'numba.roc.wavebarrier', 'roc.wavebarrier', ([], {}), '()\n', (465, 467), False, 'from numba import roc, intp\n'), ((977, 989), 'numpy.copy', 'np.copy', (['inp'], {}), '(inp)\n', (984, 989), True, 'import numpy as np\n'), ((1004, 1023), 'numpy.zeros', 'np.zeros', (['(numblk,)'], {}), '((numblk,))\n', (1012, 1023), True, 'import numpy as np\n'), ((1505, 1517), 'numpy.copy', 'np.copy', (['inp'], {}), '(inp)\n', (1512, 1517), True, 'import numpy as np\n'), ((1532, 1551), 'numpy.zeros', 'np.zeros', (['(numblk,)'], {}), '((numblk,))\n', (1540, 1551), True, 'import numpy as np\n'), ((1900, 1919), 'numpy.arange', 'np.arange', (['WAVESIZE'], {}), '(WAVESIZE)\n', (1909, 1919), True, 'import numpy as np\n'), ((1956, 1970), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (1964, 1970), True, 'import numpy as np\n'), ((891, 932), 'numpy.arange', 'np.arange', (['(numblk * WAVESIZE)'], {'dtype': 'dtype'}), '(numblk * WAVESIZE, dtype=dtype)\n', (900, 932), True, 'import numpy as np\n'), ((1392, 1428), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(numblk * WAVESIZE)'], {}), '(0, 1, numblk * WAVESIZE)\n', (1403, 1428), True, 'import numpy as np\n'), ((2064, 2083), 'numpy.arange', 'np.arange', (['WAVESIZE'], {}), '(WAVESIZE)\n', (2073, 2083), True, 'import numpy as np\n')] |
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
class TestNormalizeOp(hu.HypothesisTestCase):
@given(X=hu.tensor(min_dim=1,
max_dim=5,
elements=st.floats(min_value=0.5, max_value=1.0)),
**hu.gcs)
def test_normalize(self, X, gc, dc):
def ref_normalize(X, axis):
x_normed = X / (
np.sqrt((X**2).sum(axis=axis, keepdims=True)) + np.finfo(X.dtype).tiny)
return (x_normed,)
for axis in range(-X.ndim, X.ndim):
op = core.CreateOperator("Normalize", "X", "Y", axis=axis)
self.assertReferenceChecks(
gc,
op,
[X],
functools.partial(ref_normalize, axis=axis))
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(min_dim=1,
max_dim=5,
elements=st.floats(min_value=0.5, max_value=1.0)),
**hu.gcs)
def test_normalize_L1(self, X, gc, dc):
def ref(X, axis):
norm = abs(X).sum(axis=axis, keepdims=True)
return (X / norm,)
for axis in range(-X.ndim, X.ndim):
print('axis: ', axis)
op = core.CreateOperator("NormalizeL1", "X", "Y", axis=axis)
self.assertReferenceChecks(
gc,
op,
[X],
functools.partial(ref, axis=axis))
self.assertDeviceChecks(dc, op, [X], [0])
| [
"functools.partial",
"caffe2.python.core.CreateOperator",
"numpy.finfo",
"hypothesis.strategies.floats"
] | [((1460, 1513), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""Normalize"""', '"""X"""', '"""Y"""'], {'axis': 'axis'}), "('Normalize', 'X', 'Y', axis=axis)\n", (1479, 1513), False, 'from caffe2.python import core\n'), ((2206, 2261), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""NormalizeL1"""', '"""X"""', '"""Y"""'], {'axis': 'axis'}), "('NormalizeL1', 'X', 'Y', axis=axis)\n", (2225, 2261), False, 'from caffe2.python import core\n'), ((1631, 1674), 'functools.partial', 'functools.partial', (['ref_normalize'], {'axis': 'axis'}), '(ref_normalize, axis=axis)\n', (1648, 1674), False, 'import functools\n'), ((2379, 2412), 'functools.partial', 'functools.partial', (['ref'], {'axis': 'axis'}), '(ref, axis=axis)\n', (2396, 2412), False, 'import functools\n'), ((1110, 1149), 'hypothesis.strategies.floats', 'st.floats', ([], {'min_value': '(0.5)', 'max_value': '(1.0)'}), '(min_value=0.5, max_value=1.0)\n', (1119, 1149), True, 'import hypothesis.strategies as st\n'), ((1890, 1929), 'hypothesis.strategies.floats', 'st.floats', ([], {'min_value': '(0.5)', 'max_value': '(1.0)'}), '(min_value=0.5, max_value=1.0)\n', (1899, 1929), True, 'import hypothesis.strategies as st\n'), ((1343, 1360), 'numpy.finfo', 'np.finfo', (['X.dtype'], {}), '(X.dtype)\n', (1351, 1360), True, 'import numpy as np\n')] |
"""
Author:
<NAME> <<EMAIL>>
Usage:
make_bb_dist_mats.py dmap [options]
Options:
-p, --pdb <pdb> The PDB file.
-c, --chains <chain-ids> Comma-separated list of chain identifiers
(defaults to the first chain).
-o, --output <file> Save the plot to a file. The file format is
determined by the file extension.
-m, --measure <measure> The inter-residue distance measure [default: CA].
-M, --mask-thresh <dist> Hide the distances below a given threshold (in
angstroms).
--plaintext Generate a plaintext distance/contact matrix
and write to stdout (recommended for
piping into other CLI programs).
--asymmetric Display the plot only in the upper-triangle.
--title TITLE The title of the plot (optional).
--xlabel <label> X-axis label [default: Coordinating Residue and its neighbors].
--ylabel <label> Y-axis label [default: Coordinating Residue and its neighbors].
--font-family <font> Font family (via matplotlib) [default: sans].
--font-size <size> Font size in points [default: 10].
--width-inches <width> Width of the plot in inches [default: 6.0].
--height-inches <height> Height of the plot in inches [default: 6.0].
--dpi <dpi> Set the plot DPI [default: 80]
--greyscale Generate a greyscale distance map.
--no-colorbar Hide the color bar on distance maps.
--transparent Set the background to transparent.
--show-frame
-v, --verbose Verbose mode.
Notes:
This script imports Lei Lu's ligand_database.py
(from https://github.com/lonelu/Metalprot/blob/2bbb51ede955dfdb744c10f73ccdf674416c453e/metalprot/apps/ligand_database.py),
but works with an edited version of the script (edited by <NAME>)
Sticking the options from pconpy.py down here in case docopt needs them for plot_dist_mat() (which is
essentially code copied from pconpy.py)
Currently (7.1.21), this code uses ligand_database.py (written by Lei Lu) to get the metal core. Then it calculates a
distance matrix for all the backbone atoms in these residues. Then it uses the plotting functions from pconpy.py
to generate and save distance maps. It also writes out .txt files containing the distance matrices and
information about the coordinating residues and their neighbors (that went into the distance matrix).
"""
# ligand_database.py must be in same dir as this script (doesn't need __init__.py?)
import prody as pr
import os
import numpy
import matplotlib as mpl
import pylab
from itertools import combinations, combinations_with_replacement
from docopt import docopt
import itertools
import pickle
import random
import sys
from . import ligand_database as ld
def get_bb_atoms(path):
"""
Params:
path: Path to the pdb file
Returns:
bb_atoms: A list of prody Atom() objects that represent the N, CA, C, and CB's in the
coordinating and neighboring residues.
"""
try:
pdb_prody = pr.parsePDB(path)
except:
print('Failed to Parse PDB:')
print(path)
return 'Failed to parse PDB'
cas = pdb_prody.select('name CA')
resindices = cas.getResindices()
print('Resindices are:')
print(resindices)
coordresindices=[]
for atom in cas:
if atom.getResindex() == 1 or atom.getResindex() == len(cas)-2:
coordresindices.append(atom.getResindex())
temp_coordresindices = []
for atom in cas:
if atom.getResindex() not in [0, 1, len(cas)-2, len(cas)-1] and atom.getResname() in ['HIS', 'ASP', 'GLU', 'CYS']:
temp_coordresindices.append(atom.getResindex())
pdb = path.split('/')[-1]
positions = pdb.strip('.pdb').split('_')[3:]
num_positions = len(positions)
# This isn't a flawless way to extract the coordinating residues, but hopefully it's good enough for most examples
while len(coordresindices) < num_positions:
for idx in temp_coordresindices:
if idx-1 in resindices and idx+1 in resindices and idx not in coordresindices:
coordresindices.append(idx)
break
# temp2_coordresindices = []
# for idx in temp_coordresindices:
# if idx-1 in temp_coordresindices and idx+1 in temp_coordresindices:
# continue
# else:
# temp2_coordresindices.append(idx)
# coordresindices = []
# for idx in temp2_coordresindices:
# if idx-1 in temp2_coordresindices or idx+1 in temp2_coordresindices:
# continue
# else:
# coordresindices.append(idx)
# coordresindices = []
# if len(resindices) == 12 or len(resindices) == 9:
# for i in range(len(resindices)/3):
# idx = 3 * i + 1
# coordresindices.append(idx)
full_sel_indices = []
for idx in coordresindices:
p = int(idx)
full_sel_indices.append(p-1)
full_sel_indices.append(p)
full_sel_indices.append(p+1)
print('Full Sel Indices are:')
print(full_sel_indices)
full_sel = 'resindex '
for num in full_sel_indices:
full_sel = full_sel + str(num) + ' '
full_sel.strip(' ')
print('Full Sel is:')
print(full_sel)
# print('blank in pdb prody is')
# for blank in pdb_prody:
# print(blank.getResindex())
# print(blank.getResnum())
all_atoms = pdb_prody.select(full_sel)
print(all_atoms)
n_resinds, ca_resinds, c_resinds, cb_resinds = [], [], [], []
n_atoms, ca_atoms, c_atoms, cb_atoms = [], [], [], []
# Iterate through the indices in case the same residue needs to be included twice.
for idx in full_sel_indices:
for atom in all_atoms:
# print(atom)
if atom.getResindex() == idx:
if atom.getName() == 'N': # and atom.getResnum() not in n_resnums:
n_atoms.append(atom)
n_resinds.append(atom.getResindex())
elif atom.getName() == 'CA': # and atom.getResnum() not in ca_resnums:
ca_atoms.append(atom)
ca_resinds.append(atom.getResindex())
if atom.getResname() == 'GLY':
new_atom = atom.copy()[0] # copy method returns an atom group, which can be indexed to return at Atom
new_atom.setName('CB')
cb_atoms.append(new_atom)
cb_resinds.append(atom.getResindex())
elif atom.getName() == 'C': # and atom.getResnum() not in c_resnums:
c_atoms.append(atom)
c_resinds.append(atom.getResindex())
elif atom.getName() == 'CB': # and atom.getResnum() not in cb_resnums:
cb_atoms.append(atom)
cb_resinds.append(atom.getResindex())
bb_atoms = (n_atoms, ca_atoms, c_atoms, cb_atoms)
return bb_atoms
def calc_dist_matrix(atoms, dist_thresh=None,
mask_thresh=None, asymmetric=False):
"""
<NAME>: Adapted this from pconpy.py
Calculate the distance matrix for a list of residues.
This function is currently called by plot_dist_mat (7.1.21)
Args:
# residues: A list of ``Bio.PDB.Residue`` objects.
atoms: A list of 'Prody.Atom()' objects.
measure: The distance measure (optional).
dist_thresh: (optional).
mask_thresh: (optional).
asymmetric: (optional).
Returns:
The distance matrix as a masked array.
"""
### We want the same size dist mat every time.
### Using 12 x 12 as uniform mat size. 4 coordinating residues (max) x
### 3-residue stretches (i-1, i+1)
# mat = numpy.zeros((len(atoms), len(atoms)), dtype="float64")
mat = numpy.zeros((12, 12), dtype="float64")
# after the distances are added to the upper-triangle, the nan values
# indicate the lower matrix values, which are "empty", but can be used to
# convey other information if needed.
# mat[:] = numpy.nan
# Compute the upper-triangle of the underlying distance matrix.
#
# TODO:
# - parallelise this over multiple processes + show benchmark results.
# - use the lower-triangle to convey other information.
pair_indices = combinations_with_replacement(range(len(atoms)), 2)
for i, j in pair_indices:
atom_a = atoms[i]
atom_b = atoms[j]
dist = calc_distance(atom_a, atom_b)
mat[i,j] = dist
if not asymmetric:
mat[j,i] = dist
# transpose i with j so the distances are contained only in the
# upper-triangle.
mat = mat.T
if dist_thresh is not None:
mat = mat < dist_thresh
if mask_thresh:
mat = numpy.ma.masked_greater_equal(mat, mask_thresh)
return mat
def calc_distance(atom_a, atom_b):
"""
Takes two prody Atom() objects and returns the Euclidean distance between them
"""
if 'BLANK_ATOM' in atom_a:
return 0
if 'BLANK_ATOM' in atom_b:
return 0
a_coords = atom_a.getCoords()
b_coords = atom_b.getCoords()
dist = numpy.linalg.norm(a_coords - b_coords)
return dist
#
# pconpy.py Plotting Functions
#
def px2pt(p):
"""Convert pixels to points.
"""
return p * 72. / 96.
def init_spines(hidden=[]):
"""Initialise the plot frame, hiding the selected spines.
Args:
hidden: A list of spine names to hide. For example, set hidden
to ["top", "right"] to hide both the top and right axes borders from
the plot. All spines will be hidden if hidden is an empty list (optional).
Returns:
``None``.
"""
ax = pylab.gca()
all_spines = ["top", "bottom", "right", "left", "polar"]
for spine in all_spines:
if spine in hidden:
ax.spines[spine].set_visible(False)
else:
try:
ax.spines[spine].set_visible(True)
ax.spines[spine].set_linewidth(px2pt(0.75))
except KeyError:
pass
return
def init_pylab(font_kwargs={}):
"""Initialise and clean up the look and feel of the plotting area.
Returns:
``None``.
"""
mpl.rc("lines", linewidth=px2pt(1))
mpl.rc("xtick", **{"direction" : "out" })
mpl.rc("ytick", **{"direction" : "out" })
mpl.rc("legend", frameon=False, fontsize=font_kwargs["size"], numpoints=1)
mpl.rc("font", **font_kwargs)
pylab.tick_params(axis="x", which="both", top="off")
pylab.tick_params(axis="y", which="both", right="off")
init_spines()
return
#
# End pconpy.py Plotting Functions
#
def get_dist_mat(opts, bb_atoms):
"""
Takes a list of prody.Atoms and calls calc_dist_matrix
"""
### Commenting this out, since I don't require <dist> arg in usage
# Distance threshold for contact maps.
# if opts["<dist>"]:
# opts["<dist>"] = float(opts["<dist>"])
if opts["--mask-thresh"]:
opts["--mask-thresh"] = float(opts["--mask-thresh"])
if opts["--chains"]:
chain_ids = opts["--chains"].upper().split(",")
# Check that pdb chain ids are alphanumeric (see:
# http://deposit.rcsb.org/adit/).
if not numpy.all(map(str.isalnum, chain_ids)):
sys.stderr.write()
### Commenting this out because of KeyError: opts["hbmap"]
# if opts["hbmap"]:
# measure = "hb"
# else:
# measure = opts["--measure"]
### Commenting this out because we already have the coordinates
# residues = get_residues(opts["--pdb"], chain_ids=chain_ids)
#
# Generate the underlying 2D matrix for the selected plot.
#
### Modified this line to just take bb_atoms as param
if opts['--mask-thresh']:
mat = calc_dist_matrix(bb_atoms, mask_thresh=opts['--mask-thresh'])
else:
mat = calc_dist_matrix(bb_atoms)
return mat
def plot_dist_mat(opts, mat, pdb, mat_type, out_folder, test_perm_mat=False):
"""
This is just the 'if name=main' block from pconpy.py. Now split into
mult functions (get_dist_mat, plot_dist_mat)
"""
if opts["--plaintext"]:
if opts["cmap"] or opts["hbmap"]:
fmt = "%d"
else:
fmt = "%.3f"
numpy.savetxt(opts["--output"], mat.filled(0), fmt=fmt)
else:
font_kwargs = {
"family" : opts["--font-family"],
"size" : float(opts["--font-size"]) }
init_pylab(font_kwargs)
# hide all the spines i.e. no axes are drawn
init_spines(hidden=["top", "bottom", "left", "right"])
pylab.gcf().set_figwidth(float(opts["--width-inches"]))
pylab.gcf().set_figheight(float(opts["--height-inches"]))
### Changed to 12 for const. size dist maps
pylab.xlim(0, 12) #len(bb_atoms))
pylab.ylim(0, 12) #len(bb_atoms))
pylab.xlabel(mat_type + ' for Coordinating Residues and their neighbors') # opts["--xlabel"])
pylab.ylabel(mat_type + ' for Coordinating Residues and their neighbors') # opts["--ylabel"])
ax, fig = pylab.gca(), pylab.gcf()
if opts["--show-frame"]:
init_spines(hidden=[])
### Commenting this out because I only accept dmap as a option at the moment
# if opts["cmap"] or opts["hbmap"]:
# map_obj = pylab.pcolormesh(mat,
# shading="flat", edgecolors="None", cmap=mpl.cm.Greys)
if opts["dmap"]:
if opts["--greyscale"]:
cmap = mpl.cm.Greys
else:
cmap = mpl.cm.jet_r
map_obj = pylab.pcolormesh(mat, shading="flat",
edgecolors="None", cmap=cmap)
if not opts["--no-colorbar"]:
# draw the colour bar
box = ax.get_position()
pad, width = 0.02, 0.02
cax = fig.add_axes([box.xmax + pad, box.ymin, width, box.height])
cbar = pylab.colorbar(map_obj, drawedges=False, cax=cax)
cbar.outline.set_visible(False)
pylab.ylabel("Distance (angstroms)")
else:
raise NotImplementedError
if opts["--title"] is not None:
ax.set_title(opts["--title"], fontweight="bold")
if test_perm_mat:
out_file = out_folder + pdb.split('.')[0] + '_dmap_' + mat_type + '_test_perm_mat.png'
else:
out_file = out_folder + pdb.split('.')[0] + '_dmap_' + mat_type + '.png'
pylab.savefig(out_file, bbox_inches="tight",
dpi=int(opts["--dpi"]), transparent=opts["--transparent"])
def write_res_info_file(bb_atoms, pdb, mat_type, out_folder):
"""
Writes out a file with information about the bb_atoms and their residues, which
went into the distance matrix.
"""
pdb_name = pdb.split('.')[0]
res_info_file = out_folder + pdb_name + '_atom_info_' + mat_type + '.txt'
# res_info_file = pdb.split('.')[0]
# res_info_file = output_folder + res_info_file + '_atom_info' + mat_type + '.txt'
# for title, sel_pdb_prody in metal_cores:
# print("Sel. pdb prody in Metal Cores:")
# print(str(sel_pdb_prody))
# with open(res_info_file, 'w') as open_file:
# for title, sel_pdb_prody in metal_cores:
# open_file.write(title + '\n')
# open_file.write(str(sel_pdb_prody) + '\n')
# for atom in bb_atoms:
# if 'BLANK_ATOM' in atom:
# open_file.write(atom + '\n')
# else:
# open_file.write("%d %s %s %d\n" % (atom.getResindex(), atom.getResname(), atom.getName(), atom.getResnum()))
# open_file.close()
def write_dist_mat_file(mat, pdb, mat_type, out_folder, full_mat=False):
"""
Writes out a file containing the distance matrix
"""
# output_folder = 'core_contact_maps/dist_mat_txt_folder/'
numpy.set_printoptions(threshold=numpy.inf)
dist_mat_file = pdb.split('.')[0]
if full_mat:
dist_mat_file = out_folder + dist_mat_file + '_full_mat_' + mat_type + '.txt'
else:
dist_mat_file = out_folder + dist_mat_file + '_dist_mat_' + mat_type + '.txt'
with open(dist_mat_file, 'w') as open_file:
if mat_type == 'all_channels':
for i in mat:
for j in i:
open_file.write(str(j) + '\n')
open_file.write('end channel\n')
else:
for i in mat:
open_file.write(str(i) + '\n')
open_file.close()
numpy.set_printoptions(threshold=1000)
def write_pickle_file(full_mat, pdb, mat_type, out_folder):
"""
Writes a pickle file containing the input numpy array into the current permutation's folder.
Currently using this only to save the full matrix (all 44 channels).
"""
numpy.set_printoptions(threshold=numpy.inf)
pdb_name = pdb.split('.')[0]
pkl_file = out_folder + pdb_name + '_full_mat_' + mat_type + '.pkl'
with open(pkl_file, 'wb') as f:
pickle.dump(full_mat, f)
def make_permutations_of_bb_atoms(bb_atoms):
perms = list(itertools.permutations([1, 2, 3, 4]))
# print(perms)
# bb_atoms = ([1,2,3,4,5,6,7,8,9,10,11,12],[12,11,10,9,8,7,6,5,4,3,2,1]) # test lists
permuted_bb_atoms = []
for perm in perms:
new_bb_atoms = []
for atom_list in bb_atoms:
if len(atom_list) == 9:
for i in range(3):
atom_list.append('BLANK_ATOM')
new_atom_list = [] # new_atom_list contains the new order of atoms of a single type
new_atom_list = new_atom_list + atom_list[(perm[0]-1)*3:((perm[0]-1)*3)+3]
new_atom_list = new_atom_list + atom_list[(perm[1]-1)*3:((perm[1]-1)*3)+3]
new_atom_list = new_atom_list + atom_list[(perm[2]-1)*3:((perm[2]-1)*3)+3]
new_atom_list = new_atom_list + atom_list[(perm[3]-1)*3:((perm[3]-1)*3)+3]
print('ATOM LISTS___________________--------------------__________________----------\n\n\n\n\n\n\n\n\n\n')
print(atom_list[(perm[0]-1)*3:((perm[0]-1)*3)+3])
print(atom_list[(perm[1]-1)*3:((perm[1]-1)*3)+3])
print(atom_list[(perm[2]-1)*3:((perm[2]-1)*3)+3])
print(atom_list[(perm[3]-1)*3:((perm[3]-1)*3)+3])
print('length atom list is:')
print(len(atom_list))
new_bb_atoms.append(new_atom_list)
permuted_bb_atoms.append(new_bb_atoms)
# print(permuted_bb_atoms)
return permuted_bb_atoms
## Testing this function from permute_training_ex.py
def permute_training_ex(training_ex):
"""
training_ex: an array of shape (44, 12, 12), representing 44 channels of a 12x12 matrix
"""
perms = list(itertools.permutations([1, 2, 3, 4]))
random.seed(0)
perm = random.choice(perms)
new_training_ex = []
for channel in training_ex:
temp_channel = numpy.zeros([12, 12])
temp_channel[0:3,:] = channel[(perm[0]-1)*3:((perm[0]-1)*3)+3,:]
temp_channel[3:6,:] = channel[(perm[1]-1)*3:((perm[1]-1)*3)+3,:]
temp_channel[6:9,:] = channel[(perm[2]-1)*3:((perm[2]-1)*3)+3,:]
temp_channel[9:12,:] = channel[(perm[3]-1)*3:((perm[3]-1)*3)+3,:]
new_channel = numpy.zeros([12, 12])
new_channel[:,0:3] = temp_channel[:,(perm[0]-1)*3:((perm[0]-1)*3)+3]
new_channel[:,3:6] = temp_channel[:,(perm[1]-1)*3:((perm[1]-1)*3)+3]
new_channel[:,6:9] = temp_channel[:,(perm[2]-1)*3:((perm[2]-1)*3)+3]
new_channel[:,9:12] = temp_channel[:,(perm[3]-1)*3:((perm[3]-1)*3)+3]
new_training_ex.append(new_channel)
return numpy.array(new_training_ex)
def add_seq_channels(channel_mat, atom_list):
"""
Params:
channel_mat: 4 12 x 12 matrices that represent the distance maps between backbone atom types
atom_list: one sublist of p_bb_atoms, which contains a list of Atom() objects of a single bb_atom type. Used to get .Resname()
of the bb_atoms.
Returns:
full_mat: channel_mat with 40 more channels appended to it. The first 20 represent the sequence encoding with
horizontal rows containing 1's. The next 20 contain the sequence encoding with vertical columns of 1's
"""
# List of three letter codes, organized in alphabetical order of the aa's FULL NAME
threelettercodes = ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLU', 'GLN', 'GLY', 'HIS', 'ILE', 'LEU', 'LYS', 'MET',\
'PHE', 'PRO', 'SER', 'THR', 'TRP', 'TYR', 'VAL']
# changed this to 12, 12 because negative examples' atom_list is sometimes of length 9
seq_channels = numpy.zeros([12, 12, 40], dtype=int) # ([len(atom_list), len(atom_list), 40], dtype=int)
for i in range(len(atom_list)):
atom = atom_list[i]
if atom == 'BLANK_ATOM':
continue
try:
res = atom.getResname()
# print(res)
idx = threelettercodes.index(res)
except:
print('Resname of following atom not found:')
print(atom)
continue
for j in range(len(atom_list)):
seq_channels[i][j][idx] = 1 # horizontal rows of 1's in first 20 channels
seq_channels[j][i][idx+20] = 1 # vertical columns of 1's in next 20 channels
full_mat = []
for i in range(4):
# print(channel_mat[i, :, :])
# print(channel_mat[i, :, :].shape) # (12, 12)
full_mat.append(channel_mat[i, :, :])
for i in range(40):
# print(seq_channels[:, :, i])
# print(seq_channels[:, :, i].shape)
full_mat.append(seq_channels[:, :, i])
full_mat = numpy.array(full_mat)
# print(full_mat.shape)
return full_mat
'''
workdir = os.getcwd()
## This path is to the set of 63 negative examples, from the neg_zn_binding_examples folder
# pdb_path = '/output_1a2o/'
pdb_path = '/metal_decoys/'
if __name__ == '__main__':
opts = docopt(__doc__)
more_than_four_coord_res_pdbs = []
two_coord_res_pdbs = []
k = 0
for pdb in os.listdir(workdir + pdb_path):
k+=1
print(k)
if k < 18434:
continue
if '.DS_Store' in pdb:
continue
# pdb = '1a0b_ZN_1.pdb'
# If -p option is used, set pdb to that file
if opts["--pdb"]:
pdb = str(opts["--pdb"])
print(pdb)
# The absolute path to the metal core pdb file
full_path = workdir + pdb_path + pdb
# 7.6.21. bb_atoms is now a tuple of 4 lists. N, CA, C, and CB atoms.
try:
bb_atoms = get_bb_atoms(full_path)
if bb_atoms == 'Failed to parse PDB':
print('Failed to parse PDB file')
continue
except:
continue
# print('Length of bb_atoms is:')
# print(len(bb_atoms[0]))
# print(len(bb_atoms[1]))
# print(len(bb_atoms[2]))
# print(len(bb_atoms[3]))
# print('Each sublist of bb_atoms:')
# print(bb_atoms[0])
# print(bb_atoms[1])
# print(bb_atoms[2])
# print(bb_atoms[3])
## Adding this condition to avoid error when there are more than 4 coord residues
if len(bb_atoms[0]) > 12:
s = '-/-/-/-/- ' + str(pdb) + ' has more than 12 bb atoms -/-/-/-/-/-'
print(s)
more_than_four_coord_res_pdbs.append(pdb)
continue
## Adding this condition to avoid error when there are only 2 coordinating residues
if len(bb_atoms[0]) < 9:
s = '-/-/-/-/-/-' + str(pdb) + ' has fewer than 9 bb atoms -/-/-/-/-/-'
print(s)
two_coord_res_pdbs.append(pdb)
continue
out_folder = 'decoy_training_examples/set_4/'
os.makedirs(out_folder, exist_ok=True)
p_out_folder = out_folder + '/' + pdb.split('.')[0] + '/'
os.makedirs(p_out_folder, exist_ok=True)
channel_mat = []
# Calculate a distance matrix for each atom type
for atom_list in bb_atoms:
if len(atom_list) == 0:
continue
if 'BLANK_ATOM' in atom_list[0]:
mat_type = atom_list[-1].getName()
else:
mat_type = atom_list[0].getName()
mat = get_dist_mat(atom_list)
# print('MAT SHAPE IS:')
# print(mat.shape)
channel_mat.append(mat)
# print(channel_mat)
plot_dist_mat(mat, pdb, mat_type, p_out_folder)
write_dist_mat_file(mat, pdb, mat_type, p_out_folder)
write_res_info_file(atom_list, pdb, mat_type, p_out_folder)
# clear pylab workspace for next dmap
pylab.close()
### 7.20.21: After I've appended the 4 atom type matrices to channel_mat, I need to add the next 40 sequence channels.
channel_mat = numpy.array(channel_mat)
full_mat = add_seq_channels(channel_mat, atom_list)
print('Channel mat shape:')
print(channel_mat.shape)
print('Full mat shape:')
print(full_mat.shape)
write_dist_mat_file(channel_mat, pdb, 'all_channels', p_out_folder)
write_dist_mat_file(full_mat, pdb, 'all_channels', p_out_folder, full_mat=True)
write_pickle_file(full_mat, pdb, 'all_channels', p_out_folder)
# clear pylab workspace for next dmap
pylab.close()
# with open('two_coord_res_pdbs.pkl', 'wb') as f:
# pickle.dump(two_coord_res_pdbs, f)
# break
'''
def run_get_neg_dist_mats(workdir, pdb_path, out_path, opts):
#opts = docopt(__doc__)
more_than_four_coord_res_pdbs = []
two_coord_res_pdbs = []
k = 0
for pdb in os.listdir(workdir + pdb_path):
k+=1
print(k)
if '.DS_Store' in pdb:
continue
# pdb = '1a0b_ZN_1.pdb'
# If -p option is used, set pdb to that file
if opts["--pdb"]:
pdb = str(opts["--pdb"])
print(pdb)
# The absolute path to the metal core pdb file
full_path = workdir + pdb_path + pdb
# 7.6.21. bb_atoms is now a tuple of 4 lists. N, CA, C, and CB atoms.
# try:
# bb_atoms = get_bb_atoms(full_path)
# if bb_atoms == 'Failed to parse PDB':
# print('Failed to parse PDB file')
# continue
# except:
# print('Error: ' + full_path)
# continue
bb_atoms = get_bb_atoms(full_path)
if bb_atoms == 'Failed to parse PDB':
print('Failed to parse PDB file')
continue
# print('Length of bb_atoms is:')
# print(len(bb_atoms[0]))
# print(len(bb_atoms[1]))
# print(len(bb_atoms[2]))
# print(len(bb_atoms[3]))
# print('Each sublist of bb_atoms:')
# print(bb_atoms[0])
# print(bb_atoms[1])
# print(bb_atoms[2])
# print(bb_atoms[3])
## Adding this condition to avoid error when there are more than 4 coord residues
if len(bb_atoms[0]) > 12:
s = '-/-/-/-/- ' + str(pdb) + ' has more than 12 bb atoms -/-/-/-/-/-'
print(s)
more_than_four_coord_res_pdbs.append(pdb)
continue
## Adding this condition to avoid error when there are only 2 coordinating residues
if len(bb_atoms[0]) < 9:
s = '-/-/-/-/-/-' + str(pdb) + ' has fewer than 9 bb atoms -/-/-/-/-/-'
print(s)
two_coord_res_pdbs.append(pdb)
continue
out_folder = workdir + out_path
os.makedirs(out_folder, exist_ok=True)
p_out_folder = out_folder + '/' + pdb.split('.')[0] + '/'
os.makedirs(p_out_folder, exist_ok=True)
channel_mat = []
# Calculate a distance matrix for each atom type
for atom_list in bb_atoms:
if len(atom_list) == 0:
continue
if 'BLANK_ATOM' in atom_list[0]:
mat_type = atom_list[-1].getName()
else:
mat_type = atom_list[0].getName()
mat = get_dist_mat(opts, atom_list)
# print('MAT SHAPE IS:')
# print(mat.shape)
channel_mat.append(mat)
# print(channel_mat)
plot_dist_mat(opts, mat, pdb, mat_type, p_out_folder)
write_dist_mat_file(mat, pdb, mat_type, p_out_folder)
write_res_info_file(atom_list, pdb, mat_type, p_out_folder)
# clear pylab workspace for next dmap
pylab.close()
### 7.20.21: After I've appended the 4 atom type matrices to channel_mat, I need to add the next 40 sequence channels.
channel_mat = numpy.array(channel_mat)
full_mat = add_seq_channels(channel_mat, atom_list)
print('Channel mat shape:')
print(channel_mat.shape)
print('Full mat shape:')
print(full_mat.shape)
write_dist_mat_file(channel_mat, pdb, 'all_channels', p_out_folder)
write_dist_mat_file(full_mat, pdb, 'all_channels', p_out_folder, full_mat=True)
write_pickle_file(full_mat, pdb, 'all_channels', p_out_folder)
# clear pylab workspace for next dmap
pylab.close()
# with open('two_coord_res_pdbs.pkl', 'wb') as f:
# pickle.dump(two_coord_res_pdbs, f)
# break | [
"pylab.close",
"matplotlib.rc",
"pickle.dump",
"numpy.ma.masked_greater_equal",
"numpy.linalg.norm",
"pylab.gcf",
"pylab.tick_params",
"numpy.set_printoptions",
"pylab.pcolormesh",
"itertools.permutations",
"pylab.ylabel",
"random.seed",
"pylab.ylim",
"pylab.xlabel",
"prody.parsePDB",
... | [((7313, 7351), 'numpy.zeros', 'numpy.zeros', (['(12, 12)'], {'dtype': '"""float64"""'}), "((12, 12), dtype='float64')\n", (7324, 7351), False, 'import numpy\n'), ((8518, 8556), 'numpy.linalg.norm', 'numpy.linalg.norm', (['(a_coords - b_coords)'], {}), '(a_coords - b_coords)\n', (8535, 8556), False, 'import numpy\n'), ((9028, 9039), 'pylab.gca', 'pylab.gca', ([], {}), '()\n', (9037, 9039), False, 'import pylab\n'), ((9498, 9537), 'matplotlib.rc', 'mpl.rc', (['"""xtick"""'], {}), "('xtick', **{'direction': 'out'})\n", (9504, 9537), True, 'import matplotlib as mpl\n'), ((9541, 9580), 'matplotlib.rc', 'mpl.rc', (['"""ytick"""'], {}), "('ytick', **{'direction': 'out'})\n", (9547, 9580), True, 'import matplotlib as mpl\n'), ((9584, 9658), 'matplotlib.rc', 'mpl.rc', (['"""legend"""'], {'frameon': '(False)', 'fontsize': "font_kwargs['size']", 'numpoints': '(1)'}), "('legend', frameon=False, fontsize=font_kwargs['size'], numpoints=1)\n", (9590, 9658), True, 'import matplotlib as mpl\n'), ((9660, 9689), 'matplotlib.rc', 'mpl.rc', (['"""font"""'], {}), "('font', **font_kwargs)\n", (9666, 9689), True, 'import matplotlib as mpl\n'), ((9692, 9744), 'pylab.tick_params', 'pylab.tick_params', ([], {'axis': '"""x"""', 'which': '"""both"""', 'top': '"""off"""'}), "(axis='x', which='both', top='off')\n", (9709, 9744), False, 'import pylab\n'), ((9746, 9800), 'pylab.tick_params', 'pylab.tick_params', ([], {'axis': '"""y"""', 'which': '"""both"""', 'right': '"""off"""'}), "(axis='y', which='both', right='off')\n", (9763, 9800), False, 'import pylab\n'), ((14452, 14495), 'numpy.set_printoptions', 'numpy.set_printoptions', ([], {'threshold': 'numpy.inf'}), '(threshold=numpy.inf)\n', (14474, 14495), False, 'import numpy\n'), ((14980, 15018), 'numpy.set_printoptions', 'numpy.set_printoptions', ([], {'threshold': '(1000)'}), '(threshold=1000)\n', (15002, 15018), False, 'import numpy\n'), ((15256, 15299), 'numpy.set_printoptions', 'numpy.set_printoptions', ([], {'threshold': 'numpy.inf'}), '(threshold=numpy.inf)\n', (15278, 15299), False, 'import numpy\n'), ((17009, 17023), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (17020, 17023), False, 'import random\n'), ((17032, 17052), 'random.choice', 'random.choice', (['perms'], {}), '(perms)\n', (17045, 17052), False, 'import random\n'), ((17786, 17814), 'numpy.array', 'numpy.array', (['new_training_ex'], {}), '(new_training_ex)\n', (17797, 17814), False, 'import numpy\n'), ((18717, 18753), 'numpy.zeros', 'numpy.zeros', (['[12, 12, 40]'], {'dtype': 'int'}), '([12, 12, 40], dtype=int)\n', (18728, 18753), False, 'import numpy\n'), ((19571, 19592), 'numpy.array', 'numpy.array', (['full_mat'], {}), '(full_mat)\n', (19582, 19592), False, 'import numpy\n'), ((23013, 23043), 'os.listdir', 'os.listdir', (['(workdir + pdb_path)'], {}), '(workdir + pdb_path)\n', (23023, 23043), False, 'import os\n'), ((3220, 3237), 'prody.parsePDB', 'pr.parsePDB', (['path'], {}), '(path)\n', (3231, 3237), True, 'import prody as pr\n'), ((8181, 8228), 'numpy.ma.masked_greater_equal', 'numpy.ma.masked_greater_equal', (['mat', 'mask_thresh'], {}), '(mat, mask_thresh)\n', (8210, 8228), False, 'import numpy\n'), ((11780, 11797), 'pylab.xlim', 'pylab.xlim', (['(0)', '(12)'], {}), '(0, 12)\n', (11790, 11797), False, 'import pylab\n'), ((11816, 11833), 'pylab.ylim', 'pylab.ylim', (['(0)', '(12)'], {}), '(0, 12)\n', (11826, 11833), False, 'import pylab\n'), ((11853, 11926), 'pylab.xlabel', 'pylab.xlabel', (["(mat_type + ' for Coordinating Residues and their neighbors')"], {}), "(mat_type + ' for Coordinating Residues and their neighbors')\n", (11865, 11926), False, 'import pylab\n'), ((11949, 12022), 'pylab.ylabel', 'pylab.ylabel', (["(mat_type + ' for Coordinating Residues and their neighbors')"], {}), "(mat_type + ' for Coordinating Residues and their neighbors')\n", (11961, 12022), False, 'import pylab\n'), ((15435, 15459), 'pickle.dump', 'pickle.dump', (['full_mat', 'f'], {}), '(full_mat, f)\n', (15446, 15459), False, 'import pickle\n'), ((15521, 15557), 'itertools.permutations', 'itertools.permutations', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (15543, 15557), False, 'import itertools\n'), ((16970, 17006), 'itertools.permutations', 'itertools.permutations', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (16992, 17006), False, 'import itertools\n'), ((17123, 17144), 'numpy.zeros', 'numpy.zeros', (['[12, 12]'], {}), '([12, 12])\n', (17134, 17144), False, 'import numpy\n'), ((17431, 17452), 'numpy.zeros', 'numpy.zeros', (['[12, 12]'], {}), '([12, 12])\n', (17442, 17452), False, 'import numpy\n'), ((24553, 24591), 'os.makedirs', 'os.makedirs', (['out_folder'], {'exist_ok': '(True)'}), '(out_folder, exist_ok=True)\n', (24564, 24591), False, 'import os\n'), ((24655, 24695), 'os.makedirs', 'os.makedirs', (['p_out_folder'], {'exist_ok': '(True)'}), '(p_out_folder, exist_ok=True)\n', (24666, 24695), False, 'import os\n'), ((25475, 25499), 'numpy.array', 'numpy.array', (['channel_mat'], {}), '(channel_mat)\n', (25486, 25499), False, 'import numpy\n'), ((25925, 25938), 'pylab.close', 'pylab.close', ([], {}), '()\n', (25936, 25938), False, 'import pylab\n'), ((10440, 10458), 'sys.stderr.write', 'sys.stderr.write', ([], {}), '()\n', (10456, 10458), False, 'import sys\n'), ((12056, 12067), 'pylab.gca', 'pylab.gca', ([], {}), '()\n', (12065, 12067), False, 'import pylab\n'), ((12069, 12080), 'pylab.gcf', 'pylab.gcf', ([], {}), '()\n', (12078, 12080), False, 'import pylab\n'), ((12480, 12547), 'pylab.pcolormesh', 'pylab.pcolormesh', (['mat'], {'shading': '"""flat"""', 'edgecolors': '"""None"""', 'cmap': 'cmap'}), "(mat, shading='flat', edgecolors='None', cmap=cmap)\n", (12496, 12547), False, 'import pylab\n'), ((25323, 25336), 'pylab.close', 'pylab.close', ([], {}), '()\n', (25334, 25336), False, 'import pylab\n'), ((11615, 11626), 'pylab.gcf', 'pylab.gcf', ([], {}), '()\n', (11624, 11626), False, 'import pylab\n'), ((11673, 11684), 'pylab.gcf', 'pylab.gcf', ([], {}), '()\n', (11682, 11684), False, 'import pylab\n'), ((12750, 12799), 'pylab.colorbar', 'pylab.colorbar', (['map_obj'], {'drawedges': '(False)', 'cax': 'cax'}), '(map_obj, drawedges=False, cax=cax)\n', (12764, 12799), False, 'import pylab\n'), ((12840, 12876), 'pylab.ylabel', 'pylab.ylabel', (['"""Distance (angstroms)"""'], {}), "('Distance (angstroms)')\n", (12852, 12876), False, 'import pylab\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from gym.spaces import Box
import numpy as np
import logging
import ray
import ray.experimental.tf_utils
from ray.rllib.agents.sac.sac_model import SACModel
from ray.rllib.agents.ddpg.noop_model import NoopModel
from ray.rllib.agents.dqn.dqn_policy import _postprocess_dqn, PRIO_WEIGHTS
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.tf_policy_template import build_tf_policy
from ray.rllib.models import ModelCatalog
from ray.rllib.utils.error import UnsupportedSpaceException
from ray.rllib.utils import try_import_tf, try_import_tfp
from ray.rllib.utils.tf_ops import minimize_and_clip
tf = try_import_tf()
tfp = try_import_tfp()
logger = logging.getLogger(__name__)
def build_sac_model(policy, obs_space, action_space, config):
if config["model"]["custom_model"]:
logger.warning(
"Setting use_state_preprocessor=True since a custom model "
"was specified.")
config["use_state_preprocessor"] = True
if not isinstance(action_space, Box):
raise UnsupportedSpaceException(
"Action space {} is not supported for SAC.".format(action_space))
if len(action_space.shape) > 1:
raise UnsupportedSpaceException(
"Action space has multiple dimensions "
"{}. ".format(action_space.shape) +
"Consider reshaping this into a single dimension, "
"using a Tuple action space, or the multi-agent API.")
if config["use_state_preprocessor"]:
default_model = None # catalog decides
num_outputs = 256 # arbitrary
config["model"]["no_final_linear"] = True
else:
default_model = NoopModel
num_outputs = int(np.product(obs_space.shape))
policy.model = ModelCatalog.get_model_v2(
obs_space,
action_space,
num_outputs,
config["model"],
framework="tf",
model_interface=SACModel,
default_model=default_model,
name="sac_model",
actor_hidden_activation=config["policy_model"]["hidden_activation"],
actor_hiddens=config["policy_model"]["hidden_layer_sizes"],
critic_hidden_activation=config["Q_model"]["hidden_activation"],
critic_hiddens=config["Q_model"]["hidden_layer_sizes"],
twin_q=config["twin_q"])
policy.target_model = ModelCatalog.get_model_v2(
obs_space,
action_space,
num_outputs,
config["model"],
framework="tf",
model_interface=SACModel,
default_model=default_model,
name="target_sac_model",
actor_hidden_activation=config["policy_model"]["hidden_activation"],
actor_hiddens=config["policy_model"]["hidden_layer_sizes"],
critic_hidden_activation=config["Q_model"]["hidden_activation"],
critic_hiddens=config["Q_model"]["hidden_layer_sizes"],
twin_q=config["twin_q"])
return policy.model
def postprocess_trajectory(policy,
sample_batch,
other_agent_batches=None,
episode=None):
return _postprocess_dqn(policy, sample_batch)
def exploration_setting_inputs(policy):
return {
policy.stochastic: policy.config["exploration_enabled"],
}
def build_action_output(policy, model, input_dict, obs_space, action_space,
config):
model_out, _ = model({
"obs": input_dict[SampleBatch.CUR_OBS],
"is_training": policy._get_is_training_placeholder(),
}, [], None)
def unsquash_actions(actions):
# Use sigmoid to scale to [0,1], but also double magnitude of input to
# emulate behaviour of tanh activation used in SAC and TD3 papers.
sigmoid_out = tf.nn.sigmoid(2 * actions)
# Rescale to actual env policy scale
# (shape of sigmoid_out is [batch_size, dim_actions], so we reshape to
# get same dims)
action_range = (action_space.high - action_space.low)[None]
low_action = action_space.low[None]
unsquashed_actions = action_range * sigmoid_out + low_action
return unsquashed_actions
squashed_stochastic_actions, log_pis = policy.model.get_policy_output(
model_out, deterministic=False)
stochastic_actions = unsquash_actions(squashed_stochastic_actions)
squashed_deterministic_actions, _ = policy.model.get_policy_output(
model_out, deterministic=True)
deterministic_actions = unsquash_actions(squashed_deterministic_actions)
actions = tf.cond(policy.stochastic, lambda: stochastic_actions,
lambda: deterministic_actions)
action_probabilities = tf.cond(policy.stochastic, lambda: log_pis,
lambda: tf.zeros_like(log_pis))
policy.output_actions = actions
return actions, action_probabilities
def actor_critic_loss(policy, batch_tensors):
model_out_t, _ = policy.model({
"obs": batch_tensors[SampleBatch.CUR_OBS],
"is_training": policy._get_is_training_placeholder(),
}, [], None)
model_out_tp1, _ = policy.model({
"obs": batch_tensors[SampleBatch.NEXT_OBS],
"is_training": policy._get_is_training_placeholder(),
}, [], None)
target_model_out_tp1, _ = policy.target_model({
"obs": batch_tensors[SampleBatch.NEXT_OBS],
"is_training": policy._get_is_training_placeholder(),
}, [], None)
# TODO(hartikainen): figure actions and log pis
policy_t, log_pis_t = policy.model.get_policy_output(model_out_t)
policy_tp1, log_pis_tp1 = policy.model.get_policy_output(model_out_tp1)
log_alpha = policy.model.log_alpha
alpha = policy.model.alpha
# q network evaluation
q_t = policy.model.get_q_values(model_out_t,
batch_tensors[SampleBatch.ACTIONS])
if policy.config["twin_q"]:
twin_q_t = policy.model.get_twin_q_values(
model_out_t, batch_tensors[SampleBatch.ACTIONS])
# Q-values for current policy (no noise) in given current state
q_t_det_policy = policy.model.get_q_values(model_out_t, policy_t)
# target q network evaluation
q_tp1 = policy.target_model.get_q_values(target_model_out_tp1, policy_tp1)
if policy.config["twin_q"]:
twin_q_tp1 = policy.target_model.get_twin_q_values(
target_model_out_tp1, policy_tp1)
q_t_selected = tf.squeeze(q_t, axis=len(q_t.shape) - 1)
if policy.config["twin_q"]:
twin_q_t_selected = tf.squeeze(twin_q_t, axis=len(q_t.shape) - 1)
q_tp1 = tf.minimum(q_tp1, twin_q_tp1)
q_tp1 -= tf.expand_dims(alpha * log_pis_t, 1)
q_tp1_best = tf.squeeze(input=q_tp1, axis=len(q_tp1.shape) - 1)
q_tp1_best_masked = (1.0 - tf.cast(batch_tensors[SampleBatch.DONES],
tf.float32)) * q_tp1_best
assert policy.config["n_step"] == 1, "TODO(hartikainen) n_step > 1"
# compute RHS of bellman equation
q_t_selected_target = tf.stop_gradient(
batch_tensors[SampleBatch.REWARDS] +
policy.config["gamma"]**policy.config["n_step"] * q_tp1_best_masked)
# compute the error (potentially clipped)
if policy.config["twin_q"]:
td_error = q_t_selected - q_t_selected_target
twin_td_error = twin_q_t_selected - q_t_selected_target
td_error = td_error + twin_td_error
errors = 0.5 * (tf.square(td_error) + tf.square(twin_td_error))
else:
td_error = q_t_selected - q_t_selected_target
errors = 0.5 * tf.square(td_error)
critic_loss = policy.model.custom_loss(
tf.reduce_mean(batch_tensors[PRIO_WEIGHTS] * errors), batch_tensors)
actor_loss = tf.reduce_mean(alpha * log_pis_t - q_t_det_policy)
target_entropy = (-np.prod(policy.action_space.shape)
if policy.config["target_entropy"] == "auto" else
policy.config["target_entropy"])
alpha_loss = -tf.reduce_mean(
log_alpha * tf.stop_gradient(log_pis_t + target_entropy))
# save for stats function
policy.q_t = q_t
policy.td_error = td_error
policy.actor_loss = actor_loss
policy.critic_loss = critic_loss
policy.alpha_loss = alpha_loss
# in a custom apply op we handle the losses separately, but return them
# combined in one loss for now
return actor_loss + critic_loss + alpha_loss
def gradients(policy, optimizer, loss):
if policy.config["grad_norm_clipping"] is not None:
actor_grads_and_vars = minimize_and_clip(
policy._actor_optimizer,
policy.actor_loss,
var_list=policy.model.policy_variables(),
clip_val=policy.config["grad_norm_clipping"])
critic_grads_and_vars = minimize_and_clip(
policy._critic_optimizer,
policy.critic_loss,
var_list=policy.model.q_variables(),
clip_val=policy.config["grad_norm_clipping"])
alpha_grads_and_vars = minimize_and_clip(
policy._alpha_optimizer,
policy.alpha_loss,
var_list=policy.model.alpha,
clip_val=policy.config["grad_norm_clipping"])
else:
actor_grads_and_vars = policy._actor_optimizer.compute_gradients(
policy.actor_loss, var_list=policy.model.policy_variables())
critic_grads_and_vars = policy._critic_optimizer.compute_gradients(
policy.critic_loss, var_list=policy.model.q_variables())
alpha_grads_and_vars = policy._critic_optimizer.compute_gradients(
policy.alpha_loss, var_list=policy.model.alpha)
# save these for later use in build_apply_op
policy._actor_grads_and_vars = [(g, v) for (g, v) in actor_grads_and_vars
if g is not None]
policy._critic_grads_and_vars = [(g, v) for (g, v) in critic_grads_and_vars
if g is not None]
policy._alpha_grads_and_vars = [(g, v) for (g, v) in alpha_grads_and_vars
if g is not None]
grads_and_vars = (
policy._actor_grads_and_vars + policy._critic_grads_and_vars +
policy._alpha_grads_and_vars)
return grads_and_vars
def stats(policy, batch_tensors):
return {
"td_error": tf.reduce_mean(policy.td_error),
"actor_loss": tf.reduce_mean(policy.actor_loss),
"critic_loss": tf.reduce_mean(policy.critic_loss),
"mean_q": tf.reduce_mean(policy.q_t),
"max_q": tf.reduce_max(policy.q_t),
"min_q": tf.reduce_min(policy.q_t),
}
class ExplorationStateMixin(object):
def __init__(self, obs_space, action_space, config):
self.stochastic = tf.placeholder(tf.bool, (), name="stochastic")
def set_epsilon(self, epsilon):
pass
class TargetNetworkMixin(object):
def __init__(self, config):
# update_target_fn will be called periodically to copy Q network to
# target Q network
self.tau_value = config.get("tau")
self.tau = tf.placeholder(tf.float32, (), name="tau")
update_target_expr = []
model_vars = self.model.trainable_variables()
target_model_vars = self.target_model.trainable_variables()
assert len(model_vars) == len(target_model_vars), \
(model_vars, target_model_vars)
for var, var_target in zip(model_vars, target_model_vars):
update_target_expr.append(
var_target.assign(self.tau * var +
(1.0 - self.tau) * var_target))
logger.debug("Update target op {}".format(var_target))
self.update_target_expr = tf.group(*update_target_expr)
# Hard initial update
self.update_target(tau=1.0)
# support both hard and soft sync
def update_target(self, tau=None):
tau = tau or self.tau_value
return self.get_session().run(
self.update_target_expr, feed_dict={self.tau: tau})
class ActorCriticOptimizerMixin(object):
def __init__(self, config):
# create global step for counting the number of update operations
self.global_step = tf.train.get_or_create_global_step()
# use separate optimizers for actor & critic
self._actor_optimizer = tf.train.AdamOptimizer(
learning_rate=config["optimization"]["actor_learning_rate"])
self._critic_optimizer = tf.train.AdamOptimizer(
learning_rate=config["optimization"]["critic_learning_rate"])
self._alpha_optimizer = tf.train.AdamOptimizer(
learning_rate=config["optimization"]["entropy_learning_rate"])
class ComputeTDErrorMixin(object):
def compute_td_error(self, obs_t, act_t, rew_t, obs_tp1, done_mask,
importance_weights):
if not self.loss_initialized():
return np.zeros_like(rew_t)
td_err = self.get_session().run(
self.td_error,
feed_dict={
self.get_placeholder(SampleBatch.CUR_OBS): [
np.array(ob) for ob in obs_t
],
self.get_placeholder(SampleBatch.ACTIONS): act_t,
self.get_placeholder(SampleBatch.REWARDS): rew_t,
self.get_placeholder(SampleBatch.NEXT_OBS): [
np.array(ob) for ob in obs_tp1
],
self.get_placeholder(SampleBatch.DONES): done_mask,
self.get_placeholder(PRIO_WEIGHTS): importance_weights
})
return td_err
def setup_early_mixins(policy, obs_space, action_space, config):
ExplorationStateMixin.__init__(policy, obs_space, action_space, config)
ActorCriticOptimizerMixin.__init__(policy, config)
def setup_late_mixins(policy, obs_space, action_space, config):
TargetNetworkMixin.__init__(policy, config)
SACTFPolicy = build_tf_policy(
name="SACTFPolicy",
get_default_config=lambda: ray.rllib.agents.sac.sac.DEFAULT_CONFIG,
make_model=build_sac_model,
postprocess_fn=postprocess_trajectory,
extra_action_feed_fn=exploration_setting_inputs,
action_sampler_fn=build_action_output,
loss_fn=actor_critic_loss,
stats_fn=stats,
gradients_fn=gradients,
extra_learn_fetches_fn=lambda policy: {"td_error": policy.td_error},
mixins=[
TargetNetworkMixin, ExplorationStateMixin, ActorCriticOptimizerMixin,
ComputeTDErrorMixin
],
before_init=setup_early_mixins,
after_init=setup_late_mixins,
obs_include_prev_action_reward=False)
| [
"ray.rllib.utils.try_import_tf",
"numpy.zeros_like",
"ray.rllib.utils.try_import_tfp",
"ray.rllib.utils.tf_ops.minimize_and_clip",
"ray.rllib.agents.dqn.dqn_policy._postprocess_dqn",
"numpy.prod",
"ray.rllib.models.ModelCatalog.get_model_v2",
"numpy.product",
"numpy.array",
"ray.rllib.policy.tf_po... | [((735, 750), 'ray.rllib.utils.try_import_tf', 'try_import_tf', ([], {}), '()\n', (748, 750), False, 'from ray.rllib.utils import try_import_tf, try_import_tfp\n'), ((757, 773), 'ray.rllib.utils.try_import_tfp', 'try_import_tfp', ([], {}), '()\n', (771, 773), False, 'from ray.rllib.utils import try_import_tf, try_import_tfp\n'), ((783, 810), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (800, 810), False, 'import logging\n'), ((13912, 14546), 'ray.rllib.policy.tf_policy_template.build_tf_policy', 'build_tf_policy', ([], {'name': '"""SACTFPolicy"""', 'get_default_config': '(lambda : ray.rllib.agents.sac.sac.DEFAULT_CONFIG)', 'make_model': 'build_sac_model', 'postprocess_fn': 'postprocess_trajectory', 'extra_action_feed_fn': 'exploration_setting_inputs', 'action_sampler_fn': 'build_action_output', 'loss_fn': 'actor_critic_loss', 'stats_fn': 'stats', 'gradients_fn': 'gradients', 'extra_learn_fetches_fn': "(lambda policy: {'td_error': policy.td_error})", 'mixins': '[TargetNetworkMixin, ExplorationStateMixin, ActorCriticOptimizerMixin,\n ComputeTDErrorMixin]', 'before_init': 'setup_early_mixins', 'after_init': 'setup_late_mixins', 'obs_include_prev_action_reward': '(False)'}), "(name='SACTFPolicy', get_default_config=lambda : ray.rllib.\n agents.sac.sac.DEFAULT_CONFIG, make_model=build_sac_model,\n postprocess_fn=postprocess_trajectory, extra_action_feed_fn=\n exploration_setting_inputs, action_sampler_fn=build_action_output,\n loss_fn=actor_critic_loss, stats_fn=stats, gradients_fn=gradients,\n extra_learn_fetches_fn=lambda policy: {'td_error': policy.td_error},\n mixins=[TargetNetworkMixin, ExplorationStateMixin,\n ActorCriticOptimizerMixin, ComputeTDErrorMixin], before_init=\n setup_early_mixins, after_init=setup_late_mixins,\n obs_include_prev_action_reward=False)\n", (13927, 14546), False, 'from ray.rllib.policy.tf_policy_template import build_tf_policy\n'), ((1856, 2330), 'ray.rllib.models.ModelCatalog.get_model_v2', 'ModelCatalog.get_model_v2', (['obs_space', 'action_space', 'num_outputs', "config['model']"], {'framework': '"""tf"""', 'model_interface': 'SACModel', 'default_model': 'default_model', 'name': '"""sac_model"""', 'actor_hidden_activation': "config['policy_model']['hidden_activation']", 'actor_hiddens': "config['policy_model']['hidden_layer_sizes']", 'critic_hidden_activation': "config['Q_model']['hidden_activation']", 'critic_hiddens': "config['Q_model']['hidden_layer_sizes']", 'twin_q': "config['twin_q']"}), "(obs_space, action_space, num_outputs, config[\n 'model'], framework='tf', model_interface=SACModel, default_model=\n default_model, name='sac_model', actor_hidden_activation=config[\n 'policy_model']['hidden_activation'], actor_hiddens=config[\n 'policy_model']['hidden_layer_sizes'], critic_hidden_activation=config[\n 'Q_model']['hidden_activation'], critic_hiddens=config['Q_model'][\n 'hidden_layer_sizes'], twin_q=config['twin_q'])\n", (1881, 2330), False, 'from ray.rllib.models import ModelCatalog\n'), ((2433, 2914), 'ray.rllib.models.ModelCatalog.get_model_v2', 'ModelCatalog.get_model_v2', (['obs_space', 'action_space', 'num_outputs', "config['model']"], {'framework': '"""tf"""', 'model_interface': 'SACModel', 'default_model': 'default_model', 'name': '"""target_sac_model"""', 'actor_hidden_activation': "config['policy_model']['hidden_activation']", 'actor_hiddens': "config['policy_model']['hidden_layer_sizes']", 'critic_hidden_activation': "config['Q_model']['hidden_activation']", 'critic_hiddens': "config['Q_model']['hidden_layer_sizes']", 'twin_q': "config['twin_q']"}), "(obs_space, action_space, num_outputs, config[\n 'model'], framework='tf', model_interface=SACModel, default_model=\n default_model, name='target_sac_model', actor_hidden_activation=config[\n 'policy_model']['hidden_activation'], actor_hiddens=config[\n 'policy_model']['hidden_layer_sizes'], critic_hidden_activation=config[\n 'Q_model']['hidden_activation'], critic_hiddens=config['Q_model'][\n 'hidden_layer_sizes'], twin_q=config['twin_q'])\n", (2458, 2914), False, 'from ray.rllib.models import ModelCatalog\n'), ((3199, 3237), 'ray.rllib.agents.dqn.dqn_policy._postprocess_dqn', '_postprocess_dqn', (['policy', 'sample_batch'], {}), '(policy, sample_batch)\n', (3215, 3237), False, 'from ray.rllib.agents.dqn.dqn_policy import _postprocess_dqn, PRIO_WEIGHTS\n'), ((9051, 9192), 'ray.rllib.utils.tf_ops.minimize_and_clip', 'minimize_and_clip', (['policy._alpha_optimizer', 'policy.alpha_loss'], {'var_list': 'policy.model.alpha', 'clip_val': "policy.config['grad_norm_clipping']"}), "(policy._alpha_optimizer, policy.alpha_loss, var_list=\n policy.model.alpha, clip_val=policy.config['grad_norm_clipping'])\n", (9068, 9192), False, 'from ray.rllib.utils.tf_ops import minimize_and_clip\n'), ((1807, 1834), 'numpy.product', 'np.product', (['obs_space.shape'], {}), '(obs_space.shape)\n', (1817, 1834), True, 'import numpy as np\n'), ((7851, 7885), 'numpy.prod', 'np.prod', (['policy.action_space.shape'], {}), '(policy.action_space.shape)\n', (7858, 7885), True, 'import numpy as np\n'), ((12901, 12921), 'numpy.zeros_like', 'np.zeros_like', (['rew_t'], {}), '(rew_t)\n', (12914, 12921), True, 'import numpy as np\n'), ((13096, 13108), 'numpy.array', 'np.array', (['ob'], {}), '(ob)\n', (13104, 13108), True, 'import numpy as np\n'), ((13358, 13370), 'numpy.array', 'np.array', (['ob'], {}), '(ob)\n', (13366, 13370), True, 'import numpy as np\n')] |
import numpy as np
from .MaterialBase import Material
from Florence.Tensor import trace, Voigt
class NeoHookeanBSmith(Material):
"""The fundamental Neo-Hookean internal energy, described in <NAME> et. al.
W(C) = mu/2*(C:I-3) + lamb/2*(J - alpha)**2 - mu/2*ln(C:I + 1)
"""
def __init__(self, ndim, **kwargs):
mtype = type(self).__name__
super(NeoHookeanBSmith, self).__init__(mtype, ndim, **kwargs)
self.is_transversely_isotropic = False
self.energy_type = "internal_energy"
self.nature = "nonlinear"
self.fields = "mechanics"
if self.ndim==3:
self.H_VoigtSize = 6
elif self.ndim==2:
self.H_VoigtSize = 3
# LOW LEVEL DISPATCHER
# self.has_low_level_dispatcher = True
self.has_low_level_dispatcher = False
def KineticMeasures(self,F,ElectricFieldx=0, elem=0):
from Florence.MaterialLibrary.LLDispatch._NeoHookean_ import KineticMeasures
return KineticMeasures(self,F)
def Hessian(self,StrainTensors,ElectricFieldx=None,elem=0,gcounter=0):
I = StrainTensors['I']
J = StrainTensors['J'][gcounter]
if np.isclose(J, 0) or J < 0:
delta = np.sqrt(0.04 * J * J + 1e-8);
# J = 0.5 * (J + np.sqrt(J**2 + 4 *delta**2))
mu = self.mu
lamb = self.lamb
b = StrainTensors['b'][gcounter]
trb = np.trace(b)
if self.ndim==2:
trb += 1
delta = 1.
alpha = 1 + 3./4. * mu / lamb
C_Voigt = 2. * mu / J / (trb + delta)**2 * np.einsum("ij,kl", b, b) + 2 * lamb * J * (1. - alpha/2./J) * np.einsum("ij,kl", I, I) -\
lamb * (J - alpha) * (np.einsum("ik,jl", I, I) + np.einsum("il,jk", I, I) )
C_Voigt = Voigt(C_Voigt,1)
self.H_VoigtSize = C_Voigt.shape[0]
return C_Voigt
def CauchyStress(self,StrainTensors,ElectricFieldx=None,elem=0,gcounter=0):
I = StrainTensors['I']
J = StrainTensors['J'][gcounter]
b = StrainTensors['b'][gcounter]
if np.isclose(J, 0) or J < 0:
delta = np.sqrt(0.04 * J * J + 1e-8);
# J = 0.5 * (J + np.sqrt(J**2 + 4 *delta**2))
mu = self.mu
lamb = self.lamb
trb = np.trace(b)
if self.ndim==2:
trb += 1
delta = 1.
alpha = 1 + 3./4. * mu / lamb
stress = mu / J * (1. - 1./(trb + delta)) * b + lamb * (J - alpha) * I
return stress
def InternalEnergy(self,StrainTensors,elem=0,gcounter=0):
mu = self.mu
lamb = self.lamb
I = StrainTensors['I']
J = StrainTensors['J'][gcounter]
F = StrainTensors['F'][gcounter]
C = np.dot(F.T,F)
if np.isclose(J, 0) or J < 0:
delta = np.sqrt(0.04 * J * J + 1e-8);
# J = 0.5 * (J + np.sqrt(J**2 + 4 *delta**2))
alpha = 1 + 3./4. * mu / lamb
energy = mu/2.*(trace(C) - 3.) - mu/2.*np.log(trace(C) + 1) + lamb/2.*(J-alpha)**2
return energy
| [
"numpy.trace",
"numpy.einsum",
"numpy.isclose",
"Florence.MaterialLibrary.LLDispatch._NeoHookean_.KineticMeasures",
"numpy.dot",
"Florence.Tensor.Voigt",
"Florence.Tensor.trace",
"numpy.sqrt"
] | [((1002, 1026), 'Florence.MaterialLibrary.LLDispatch._NeoHookean_.KineticMeasures', 'KineticMeasures', (['self', 'F'], {}), '(self, F)\n', (1017, 1026), False, 'from Florence.MaterialLibrary.LLDispatch._NeoHookean_ import KineticMeasures\n'), ((1425, 1436), 'numpy.trace', 'np.trace', (['b'], {}), '(b)\n', (1433, 1436), True, 'import numpy as np\n'), ((1796, 1813), 'Florence.Tensor.Voigt', 'Voigt', (['C_Voigt', '(1)'], {}), '(C_Voigt, 1)\n', (1801, 1813), False, 'from Florence.Tensor import trace, Voigt\n'), ((2286, 2297), 'numpy.trace', 'np.trace', (['b'], {}), '(b)\n', (2294, 2297), True, 'import numpy as np\n'), ((2740, 2754), 'numpy.dot', 'np.dot', (['F.T', 'F'], {}), '(F.T, F)\n', (2746, 2754), True, 'import numpy as np\n'), ((1188, 1204), 'numpy.isclose', 'np.isclose', (['J', '(0)'], {}), '(J, 0)\n', (1198, 1204), True, 'import numpy as np\n'), ((1235, 1264), 'numpy.sqrt', 'np.sqrt', (['(0.04 * J * J + 1e-08)'], {}), '(0.04 * J * J + 1e-08)\n', (1242, 1264), True, 'import numpy as np\n'), ((2089, 2105), 'numpy.isclose', 'np.isclose', (['J', '(0)'], {}), '(J, 0)\n', (2099, 2105), True, 'import numpy as np\n'), ((2136, 2165), 'numpy.sqrt', 'np.sqrt', (['(0.04 * J * J + 1e-08)'], {}), '(0.04 * J * J + 1e-08)\n', (2143, 2165), True, 'import numpy as np\n'), ((2766, 2782), 'numpy.isclose', 'np.isclose', (['J', '(0)'], {}), '(J, 0)\n', (2776, 2782), True, 'import numpy as np\n'), ((2813, 2842), 'numpy.sqrt', 'np.sqrt', (['(0.04 * J * J + 1e-08)'], {}), '(0.04 * J * J + 1e-08)\n', (2820, 2842), True, 'import numpy as np\n'), ((1591, 1615), 'numpy.einsum', 'np.einsum', (['"""ij,kl"""', 'b', 'b'], {}), "('ij,kl', b, b)\n", (1600, 1615), True, 'import numpy as np\n'), ((1653, 1677), 'numpy.einsum', 'np.einsum', (['"""ij,kl"""', 'I', 'I'], {}), "('ij,kl', I, I)\n", (1662, 1677), True, 'import numpy as np\n'), ((1723, 1747), 'numpy.einsum', 'np.einsum', (['"""ik,jl"""', 'I', 'I'], {}), "('ik,jl', I, I)\n", (1732, 1747), True, 'import numpy as np\n'), ((1751, 1775), 'numpy.einsum', 'np.einsum', (['"""il,jk"""', 'I', 'I'], {}), "('il,jk', I, I)\n", (1760, 1775), True, 'import numpy as np\n'), ((2965, 2973), 'Florence.Tensor.trace', 'trace', (['C'], {}), '(C)\n', (2970, 2973), False, 'from Florence.Tensor import trace, Voigt\n'), ((2995, 3003), 'Florence.Tensor.trace', 'trace', (['C'], {}), '(C)\n', (3000, 3003), False, 'from Florence.Tensor import trace, Voigt\n')] |
import os
import numpy as np
import unittest
from tqdm import trange
from autograd import Tensor, SGD, NLLLoss, fetch_mnist
from autograd import Adam
import autograd.nn as nn
np.random.seed(1)
__optimizers__ = {}
__optimizers__['sgd'] = SGD
__optimizers__['adam'] = Adam
class MNISTNet(nn.Module):
def __init__(self):
self.affines = nn.Sequential(
nn.Dense(784, 128),
nn.ReLU(),
nn.Dense(128, 10),
nn.LogSoftmax()
)
def forward(self, x):
return self.affines(x)
class TestMNISTdigits(unittest.TestCase):
def test(self):
def _test(optimiz):
model = MNISTNet()
criterion = NLLLoss()
optimizer = __optimizers__[optimiz](model.parameters(), lr=1e-3)
X_train, Y_train, X_test, Y_test = fetch_mnist()
X_train = X_train.reshape((-1, 784))
X_test = X_test.reshape((-1, 784))
epochs = 300
batch_size = 128
for _ in (t := trange(epochs, disable=os.getenv('CI') is not None)):
indices = np.random.randint(0, X_train.shape[0], size=(batch_size))
samples = Tensor(X_train[indices])
targets = Y_train[indices]
logits = model(samples)
loss = criterion(logits, targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
preds = np.argmax(logits.data, axis=-1)
acc = (preds == targets).mean()
t.set_description(
f'loss {loss.data[0]:.2f} accuracy {acc:.2f}')
Y_test_preds_out = model(Tensor(X_test)).data
Y_test_preds = np.argmax(Y_test_preds_out, axis=-1)
acc = (Y_test_preds == Y_test).mean()
assert acc >= 0.9
print(f'optimizer {optimiz} got {100*acc:.1f} % acc')
_test('sgd')
_test('adam')
class TestMNISTfashion(unittest.TestCase):
def test(self):
def _test(optimiz):
model = MNISTNet()
criterion = NLLLoss()
optimizer = __optimizers__[optimiz](model.parameters(), lr=1e-3)
X_train, Y_train, X_test, Y_test = fetch_mnist('fashion')
X_train = X_train.reshape((-1, 784))
X_test = X_test.reshape((-1, 784))
epochs = 300
batch_size = 128
for _ in (t := trange(epochs, disable=os.getenv('CI') is not None)):
indices = np.random.randint(0, X_train.shape[0], size=(batch_size))
samples = Tensor(X_train[indices])
targets = Y_train[indices]
logits = model(samples)
loss = criterion(logits, targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
preds = np.argmax(logits.data, axis=-1)
acc = (preds == targets).mean()
t.set_description(
f'loss {loss.data[0]:.2f} accuracy {acc:.2f}')
Y_test_preds_out = model(Tensor(X_test)).data
Y_test_preds = np.argmax(Y_test_preds_out, axis=-1)
acc = (Y_test_preds == Y_test).mean()
assert acc >= 0.75
print(f'optimizer {optimiz} got {100*acc:.1f} % acc')
_test('sgd')
_test('adam')
| [
"autograd.NLLLoss",
"autograd.nn.LogSoftmax",
"autograd.Tensor",
"numpy.random.seed",
"numpy.argmax",
"autograd.fetch_mnist",
"autograd.nn.ReLU",
"numpy.random.randint",
"os.getenv",
"autograd.nn.Dense"
] | [((175, 192), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (189, 192), True, 'import numpy as np\n'), ((380, 398), 'autograd.nn.Dense', 'nn.Dense', (['(784)', '(128)'], {}), '(784, 128)\n', (388, 398), True, 'import autograd.nn as nn\n'), ((416, 425), 'autograd.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (423, 425), True, 'import autograd.nn as nn\n'), ((443, 460), 'autograd.nn.Dense', 'nn.Dense', (['(128)', '(10)'], {}), '(128, 10)\n', (451, 460), True, 'import autograd.nn as nn\n'), ((478, 493), 'autograd.nn.LogSoftmax', 'nn.LogSoftmax', ([], {}), '()\n', (491, 493), True, 'import autograd.nn as nn\n'), ((720, 729), 'autograd.NLLLoss', 'NLLLoss', ([], {}), '()\n', (727, 729), False, 'from autograd import Tensor, SGD, NLLLoss, fetch_mnist\n'), ((854, 867), 'autograd.fetch_mnist', 'fetch_mnist', ([], {}), '()\n', (865, 867), False, 'from autograd import Tensor, SGD, NLLLoss, fetch_mnist\n'), ((1780, 1816), 'numpy.argmax', 'np.argmax', (['Y_test_preds_out'], {'axis': '(-1)'}), '(Y_test_preds_out, axis=-1)\n', (1789, 1816), True, 'import numpy as np\n'), ((2156, 2165), 'autograd.NLLLoss', 'NLLLoss', ([], {}), '()\n', (2163, 2165), False, 'from autograd import Tensor, SGD, NLLLoss, fetch_mnist\n'), ((2290, 2312), 'autograd.fetch_mnist', 'fetch_mnist', (['"""fashion"""'], {}), "('fashion')\n", (2301, 2312), False, 'from autograd import Tensor, SGD, NLLLoss, fetch_mnist\n'), ((3225, 3261), 'numpy.argmax', 'np.argmax', (['Y_test_preds_out'], {'axis': '(-1)'}), '(Y_test_preds_out, axis=-1)\n', (3234, 3261), True, 'import numpy as np\n'), ((1126, 1181), 'numpy.random.randint', 'np.random.randint', (['(0)', 'X_train.shape[0]'], {'size': 'batch_size'}), '(0, X_train.shape[0], size=batch_size)\n', (1143, 1181), True, 'import numpy as np\n'), ((1210, 1234), 'autograd.Tensor', 'Tensor', (['X_train[indices]'], {}), '(X_train[indices])\n', (1216, 1234), False, 'from autograd import Tensor, SGD, NLLLoss, fetch_mnist\n'), ((1514, 1545), 'numpy.argmax', 'np.argmax', (['logits.data'], {'axis': '(-1)'}), '(logits.data, axis=-1)\n', (1523, 1545), True, 'import numpy as np\n'), ((2571, 2626), 'numpy.random.randint', 'np.random.randint', (['(0)', 'X_train.shape[0]'], {'size': 'batch_size'}), '(0, X_train.shape[0], size=batch_size)\n', (2588, 2626), True, 'import numpy as np\n'), ((2655, 2679), 'autograd.Tensor', 'Tensor', (['X_train[indices]'], {}), '(X_train[indices])\n', (2661, 2679), False, 'from autograd import Tensor, SGD, NLLLoss, fetch_mnist\n'), ((2959, 2990), 'numpy.argmax', 'np.argmax', (['logits.data'], {'axis': '(-1)'}), '(logits.data, axis=-1)\n', (2968, 2990), True, 'import numpy as np\n'), ((1732, 1746), 'autograd.Tensor', 'Tensor', (['X_test'], {}), '(X_test)\n', (1738, 1746), False, 'from autograd import Tensor, SGD, NLLLoss, fetch_mnist\n'), ((3177, 3191), 'autograd.Tensor', 'Tensor', (['X_test'], {}), '(X_test)\n', (3183, 3191), False, 'from autograd import Tensor, SGD, NLLLoss, fetch_mnist\n'), ((1069, 1084), 'os.getenv', 'os.getenv', (['"""CI"""'], {}), "('CI')\n", (1078, 1084), False, 'import os\n'), ((2514, 2529), 'os.getenv', 'os.getenv', (['"""CI"""'], {}), "('CI')\n", (2523, 2529), False, 'import os\n')] |
import warnings
warnings.filterwarnings("ignore")
import os
import re
import numpy as np
import scipy.io as io
from util import strs
from dataset.data_util import pil_load_img
from dataset.dataload import TextDataset, TextInstance
import cv2
from util import io as libio
class TotalText(TextDataset):
def __init__(self, data_root, k, ignore_list=None, is_training=True, transform=None):
super().__init__(transform, is_training)
self.data_root = data_root
self.k = k
self.is_training = is_training
if ignore_list:
with open(ignore_list) as f:
ignore_list = f.readlines()
ignore_list = [line.strip() for line in ignore_list]
else:
ignore_list = []
self.image_root = os.path.join(data_root, 'Images', 'Train' if is_training else 'Test')
self.annotation_root = os.path.join(data_root, 'gt', 'Train' if is_training else 'Test')
self.image_list = os.listdir(self.image_root)
self.image_list = list(filter(lambda img: img.replace('.jpg', '') not in ignore_list, self.image_list))
self.annotation_list = ['poly_gt_{}'.format(img_name.replace('.jpg', '')) for img_name in self.image_list]
@staticmethod
def parse_mat(mat_path):
"""
.mat file parser
:param mat_path: (str), mat file path
:return: (list), TextInstance
"""
annot = io.loadmat(mat_path + ".mat")
polygons = []
for cell in annot['polygt']:
x = cell[1][0]
y = cell[3][0]
text = cell[4][0] if len(cell[4]) > 0 else '#'
ori = cell[5][0] if len(cell[5]) > 0 else 'c'
if len(x) < 4: # too few points
continue
pts = np.stack([x, y]).T.astype(np.int32)
polygons.append(TextInstance(pts, ori, text))
return polygons
@staticmethod
def parse_carve_txt(gt_path):
"""
.mat file parser
:param gt_path: (str), mat file path
:return: (list), TextInstance
"""
lines = libio.read_lines(gt_path + ".txt")
polygons = []
for line in lines:
line = strs.remove_all(line, '\xef\xbb\xbf')
gt = line.split(',')
xx = gt[0].replace("x: ", "").replace("[[", "").replace("]]", "").lstrip().rstrip()
yy = gt[1].replace("y: ", "").replace("[[", "").replace("]]", "").lstrip().rstrip()
try:
xx = [int(x) for x in re.split(r" *", xx)]
yy = [int(y) for y in re.split(r" *", yy)]
except:
xx = [int(x) for x in re.split(r" +", xx)]
yy = [int(y) for y in re.split(r" +", yy)]
if len(xx) < 4 or len(yy) < 4: # too few points
continue
text = gt[-1].split('\'')[1]
try:
ori = gt[-2].split('\'')[1]
except:
ori = 'c'
pts = np.stack([xx, yy]).T.astype(np.int32)
polygons.append(TextInstance(pts, ori, text))
# print(polygon)
return polygons
def __getitem__(self, item):
image_id = self.image_list[item]
image_path = os.path.join(self.image_root, image_id)
# Read image data
image = pil_load_img(image_path)
# Read annotation
annotation_id = self.annotation_list[item]
annotation_path = os.path.join(self.annotation_root, annotation_id)
polygons = self.parse_mat(annotation_path)
# polygons = self.parse_carve_txt(annotation_path)
return self.get_training_data(image, polygons, self.k, image_id=image_id, image_path=image_path)
def __len__(self):
return len(self.image_list)
| [
"numpy.stack",
"re.split",
"dataset.data_util.pil_load_img",
"scipy.io.loadmat",
"warnings.filterwarnings",
"util.strs.remove_all",
"dataset.dataload.TextInstance",
"os.path.join",
"os.listdir",
"util.io.read_lines"
] | [((16, 49), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (39, 49), False, 'import warnings\n'), ((785, 854), 'os.path.join', 'os.path.join', (['data_root', '"""Images"""', "('Train' if is_training else 'Test')"], {}), "(data_root, 'Images', 'Train' if is_training else 'Test')\n", (797, 854), False, 'import os\n'), ((886, 951), 'os.path.join', 'os.path.join', (['data_root', '"""gt"""', "('Train' if is_training else 'Test')"], {}), "(data_root, 'gt', 'Train' if is_training else 'Test')\n", (898, 951), False, 'import os\n'), ((978, 1005), 'os.listdir', 'os.listdir', (['self.image_root'], {}), '(self.image_root)\n', (988, 1005), False, 'import os\n'), ((1430, 1459), 'scipy.io.loadmat', 'io.loadmat', (["(mat_path + '.mat')"], {}), "(mat_path + '.mat')\n", (1440, 1459), True, 'import scipy.io as io\n'), ((2099, 2133), 'util.io.read_lines', 'libio.read_lines', (["(gt_path + '.txt')"], {}), "(gt_path + '.txt')\n", (2115, 2133), True, 'from util import io as libio\n'), ((3232, 3271), 'os.path.join', 'os.path.join', (['self.image_root', 'image_id'], {}), '(self.image_root, image_id)\n', (3244, 3271), False, 'import os\n'), ((3315, 3339), 'dataset.data_util.pil_load_img', 'pil_load_img', (['image_path'], {}), '(image_path)\n', (3327, 3339), False, 'from dataset.data_util import pil_load_img\n'), ((3444, 3493), 'os.path.join', 'os.path.join', (['self.annotation_root', 'annotation_id'], {}), '(self.annotation_root, annotation_id)\n', (3456, 3493), False, 'import os\n'), ((2202, 2230), 'util.strs.remove_all', 'strs.remove_all', (['line', '""""""'], {}), "(line, '')\n", (2217, 2230), False, 'from util import strs\n'), ((1843, 1871), 'dataset.dataload.TextInstance', 'TextInstance', (['pts', 'ori', 'text'], {}), '(pts, ori, text)\n', (1855, 1871), False, 'from dataset.dataload import TextDataset, TextInstance\n'), ((3056, 3084), 'dataset.dataload.TextInstance', 'TextInstance', (['pts', 'ori', 'text'], {}), '(pts, ori, text)\n', (3068, 3084), False, 'from dataset.dataload import TextDataset, TextInstance\n'), ((1779, 1795), 'numpy.stack', 'np.stack', (['[x, y]'], {}), '([x, y])\n', (1787, 1795), True, 'import numpy as np\n'), ((2520, 2538), 're.split', 're.split', (['""" *"""', 'xx'], {}), "(' *', xx)\n", (2528, 2538), False, 'import re\n'), ((2579, 2597), 're.split', 're.split', (['""" *"""', 'yy'], {}), "(' *', yy)\n", (2587, 2597), False, 'import re\n'), ((2990, 3008), 'numpy.stack', 'np.stack', (['[xx, yy]'], {}), '([xx, yy])\n', (2998, 3008), True, 'import numpy as np\n'), ((2658, 2676), 're.split', 're.split', (['""" +"""', 'xx'], {}), "(' +', xx)\n", (2666, 2676), False, 'import re\n'), ((2717, 2735), 're.split', 're.split', (['""" +"""', 'yy'], {}), "(' +', yy)\n", (2725, 2735), False, 'import re\n')] |
'''
Be careful about action_spec defined here
'''
from collections import OrderedDict
import numpy as np
from grasp.envs import MujocoEnv
from grasp.models.robots import Sawyer
from grasp.utils import transform_utils as T
from grasp.models.grippers import gripper_factory
from termcolor import colored
class SawyerEnv(MujocoEnv):
def __init__(
self,
has_renderer=False,
has_offscreen_renderer= False,
render_collision_mesh= True,
render_visual_mesh= True,
control_freq=10,
horizon=1000,
ignore_done=False,
use_camera_obs = False,
camera_name="frontview",
camera_height=256,
camera_width=256,
camera_depth=False,
use_indicator_object=False,
gripper_type = None,
use_render=True,
log_name = '1',
use_new_model = 'False',
use_pro_new='False',
to_train='False',
is_human ='False',
train_pro=False,
adv_init=False,
random_perturb=False,
use_pro_name='',
use_new_name='',
object_xml='',
user_name='',
seed = 48,
params = None,
test_user = False,
):
self.use_indicator_object = use_indicator_object
self.gripper_type = gripper_type
super().__init__(
has_renderer = has_renderer,
has_offscreen_renderer= has_offscreen_renderer,
render_collision_mesh= render_collision_mesh,
render_visual_mesh = render_visual_mesh,
control_freq = control_freq,
horizon = horizon,
ignore_done = ignore_done,
use_camera_obs = use_camera_obs,
camera_name = camera_name,
camera_height = camera_height,
camera_width = camera_width,
camera_depth = camera_depth,
use_render = use_render,
log_name = log_name,
use_new_model = use_new_model,
use_pro_new = use_pro_new,
to_train=to_train,
is_human = is_human,
train_pro = train_pro,
adv_init=adv_init,
random_perturb=random_perturb,
use_pro_name=use_pro_name,
use_new_name = use_new_name,
object_xml = object_xml,
user_name = user_name,
seed = seed,
params = params,
test_user = test_user,
)
def _load_model(self):
super()._load_model()
self.mujoco_robot = Sawyer()
# debug
self.gripper = gripper_factory(self.gripper_type)()
def _reset_internal(self):
super()._reset_internal()
def _get_reference(self):
super()._get_reference()
self.robot_joints = list(self.mujoco_robot.joints)
self._ref_joint_pos_indexes =[
self.sim.model.get_joint_qpos_addr(x) for x in self.robot_joints
]
self._ref_joint_vel_indexes =[
self.sim.model.get_joint_qvel_addr(x) for x in self.robot_joints
]
if self.use_indicator_object:
ind_qpos = self.sim.model.get_joint_qpos_addr('pos_indicator')
self._ref_indicator_pos_low , self._ref_indicator_pos_high = ind_qpos
ind_qvel = self.sim.model_get_joint_qvel_addr('pos_indicator')
self._ref_indicator_vel_low , self._ref_indicator_vel_high = ind_qvel
self.indicator_id = self.sim.model.body_name2id('pos_indicator')
# [], used in _pre_action
self._ref_joint_pos_actuator_indexes = [
self.sim.model.actuator_name2id(actuator)
for actuator in self.sim.model.actuator_names
if actuator.startswith('pos')
]
self._ref_joint_vel_actuator_indexes = [
self.sim.model.actuator_name2id(actuator)
for actuator in self.sim.model.actuator_names
if actuator.startswith('vel')
]
# debug
self.gripper_joints = list(self.mujoco_robot.gripper_joints)
self._ref_gripper_joint_pos_indexes = [
self.sim.model.get_joint_qpos_addr(x) for x in self.gripper_joints
]
self._ref_gripper_joint_vel_indexes = [
self.sim.model.get_joint_qvel_addr(x) for x in self.gripper_joints
]
# to check
# self.eef_site_id = self.sim.model.site_name2id('grip')
# _pre_action is not used by step_IK
def _pre_action(self, action):
assert len(action) == self.dof, 'environment got invalid action dimension'
low, high = self.action_spec
action = np.clip(action, low, high)
# rescale normalized action to control ranges
ctrl_range = self.sim.model.actuator_ctrlrange
bias = 0.5 * (ctrl_range[:,1] + ctrl_range[:, 0])
weight = 0.5 * (ctrl_range[:, 1] - ctrl_range[:, 0])
applied_action = bias + weight * action
self.sim.data.ctrl[:] = applied_action
# todo
# # gravity compensation
# self.sim.data.qfrc_applied[
# self._ref_joint_vel_actuator_indexes
# ] = self.sim.data.qfrc_bias[self._ref_joint_vel_indexes]
def _post_action(self, action):
ret = super()._post_action(action)
return ret
def _get_observation(self):
di = super()._get_observation()
di['joint_pos'] = np.array(
[self.sim.data.qpos[x] for x in self._ref_joint_pos_indexes]
)
di['joint_vel'] = np.array(
[self.sim.data.qvel[x] for x in self._ref_joint_vel_indexes]
)
robot_states =[
np.sin(di['joint_pos']),
np.cos(di['joint_pos']),
di['joint_vel'],
]
# if self.has_gripper:
# di['gripper_qpos'] = np.array(
# [self.sim.data.qpos[x] for x in self._ref_gripper_joint_pos_indexes]
# )
#
# robot_states.extend([di['gripper_qpos']])
# flatten
di['robot-state'] = np.concatenate(robot_states)
return di
'''
grasp_pt [2x1]
quat [4x1]
'''
@property
def action_spec(self):
# in terms of joints
# low = np.ones(self.dof) * -1.
# high = np.ones(self.dof) * 1.
# return low, high
# in terms of (x,y,quaternion)
low = np.ones(6) * -1.
high = np.ones(6) * 1.
return low, high
@property
def dof(self):
dof = self.mujoco_robot.dof
return dof
@property
def ref_joint_pos_indexes(self):
return self._ref_joint_pos_indexes
@property
def ref_gripper_pos_indexes(self):
return self._ref_gripper_joint_pos_indexes
def pose_in_base_from_name(self, name):
pos_in_world = self.sim.data.get_body_xpos(name)
rot_in_world = self.sim.data.get_body_xmat(name).reshape((3,3))
pose_in_world = T.make_pose(pos_in_world, rot_in_world)
base_pos_in_world = self.sim.data.get_body_xpos("base")
base_rot_in_world = self.sim.data.get_body_xmat("base").reshape((3,3))
base_pose_in_world = T.make_pose(base_pos_in_world, base_rot_in_world)
world_base_in_world = T.pose_inv(base_pose_in_world)
pose_in_base = T.pose_in_A_to_pose_in_B(pose_in_world, world_base_in_world)
return pose_in_base
def set_robot_joint_positions(self, pos):
self.sim.data.qpos[self._ref_joint_pos_indexes] = pos
self.sim.forward()
@property
def _right_hand_joint_cartesian_pose(self):
return self.pose_in_base_from_name("left_gripper_base")
@property
def _right_hand_pose(self):
return self.pose_in_base_from_name("left_gripper_base")
@property
def _right_hand_quat(self):
return T.mat2quat(self._right_hand_orn)
@property
def _right_hand_pos(self):
"""
Returns position of eef in base frame of robot.
"""
eef_pose_in_base = self._right_hand_pose
return eef_pose_in_base[:3, 3]
@property
def _right_hand_orn(self):
"""
Returns orientation of eef in base frame of robot as a rotation matrix.
"""
eef_pose_in_base = self._right_hand_pose
return eef_pose_in_base[:3, :3]
@property
def _joint_positions(self):
"""
Returns a numpy array of joint positions.
Sawyer robots have 7 joints and positions are in rotation angles.
"""
return self.sim.data.qpos[self._ref_joint_pos_indexes]
@property
def _joint_velocities(self):
"""
Returns a numpy array of joint velocities.
Sawyer robots have 7 joints and velocities are angular velocities.
"""
return self.sim.data.qvel[self._ref_joint_vel_indexes]
| [
"grasp.utils.transform_utils.make_pose",
"grasp.models.grippers.gripper_factory",
"grasp.models.robots.Sawyer",
"numpy.ones",
"numpy.clip",
"grasp.utils.transform_utils.pose_inv",
"grasp.utils.transform_utils.pose_in_A_to_pose_in_B",
"numpy.array",
"grasp.utils.transform_utils.mat2quat",
"numpy.si... | [((2658, 2666), 'grasp.models.robots.Sawyer', 'Sawyer', ([], {}), '()\n', (2664, 2666), False, 'from grasp.models.robots import Sawyer\n'), ((4753, 4779), 'numpy.clip', 'np.clip', (['action', 'low', 'high'], {}), '(action, low, high)\n', (4760, 4779), True, 'import numpy as np\n'), ((5517, 5587), 'numpy.array', 'np.array', (['[self.sim.data.qpos[x] for x in self._ref_joint_pos_indexes]'], {}), '([self.sim.data.qpos[x] for x in self._ref_joint_pos_indexes])\n', (5525, 5587), True, 'import numpy as np\n'), ((5637, 5707), 'numpy.array', 'np.array', (['[self.sim.data.qvel[x] for x in self._ref_joint_vel_indexes]'], {}), '([self.sim.data.qvel[x] for x in self._ref_joint_vel_indexes])\n', (5645, 5707), True, 'import numpy as np\n'), ((6161, 6189), 'numpy.concatenate', 'np.concatenate', (['robot_states'], {}), '(robot_states)\n', (6175, 6189), True, 'import numpy as np\n'), ((7065, 7104), 'grasp.utils.transform_utils.make_pose', 'T.make_pose', (['pos_in_world', 'rot_in_world'], {}), '(pos_in_world, rot_in_world)\n', (7076, 7104), True, 'from grasp.utils import transform_utils as T\n'), ((7278, 7327), 'grasp.utils.transform_utils.make_pose', 'T.make_pose', (['base_pos_in_world', 'base_rot_in_world'], {}), '(base_pos_in_world, base_rot_in_world)\n', (7289, 7327), True, 'from grasp.utils import transform_utils as T\n'), ((7358, 7388), 'grasp.utils.transform_utils.pose_inv', 'T.pose_inv', (['base_pose_in_world'], {}), '(base_pose_in_world)\n', (7368, 7388), True, 'from grasp.utils import transform_utils as T\n'), ((7413, 7473), 'grasp.utils.transform_utils.pose_in_A_to_pose_in_B', 'T.pose_in_A_to_pose_in_B', (['pose_in_world', 'world_base_in_world'], {}), '(pose_in_world, world_base_in_world)\n', (7437, 7473), True, 'from grasp.utils import transform_utils as T\n'), ((7945, 7977), 'grasp.utils.transform_utils.mat2quat', 'T.mat2quat', (['self._right_hand_orn'], {}), '(self._right_hand_orn)\n', (7955, 7977), True, 'from grasp.utils import transform_utils as T\n'), ((2706, 2740), 'grasp.models.grippers.gripper_factory', 'gripper_factory', (['self.gripper_type'], {}), '(self.gripper_type)\n', (2721, 2740), False, 'from grasp.models.grippers import gripper_factory\n'), ((5767, 5790), 'numpy.sin', 'np.sin', (["di['joint_pos']"], {}), "(di['joint_pos'])\n", (5773, 5790), True, 'import numpy as np\n'), ((5804, 5827), 'numpy.cos', 'np.cos', (["di['joint_pos']"], {}), "(di['joint_pos'])\n", (5810, 5827), True, 'import numpy as np\n'), ((6494, 6504), 'numpy.ones', 'np.ones', (['(6)'], {}), '(6)\n', (6501, 6504), True, 'import numpy as np\n'), ((6526, 6536), 'numpy.ones', 'np.ones', (['(6)'], {}), '(6)\n', (6533, 6536), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import logging
import random
import sys
import time
from functools import partial
import os
import numpy as np
from utils.evaluation import eval_with_specific_model
from utils.loader import prepare_datasets
from toolkit.joint_ner_and_md_model import MainTaggerModel
from utils import models_path, eval_script, eval_logs_dir, read_parameters_from_sys_argv
from utils.dynetsaver import DynetSaver
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("main")
def train(sys_argv):
# Read parameters from command line (skipping the program name, and the two others, i.e. --command train.
opts, parameters = read_parameters_from_sys_argv(sys_argv)
# Check evaluation script / folders
if not os.path.isfile(eval_script):
raise Exception('CoNLL evaluation script not found at "%s"' % eval_script)
# Reload
if opts.model_epoch_path:
model = MainTaggerModel(models_path=models_path,
model_path=opts.model_path,
model_epoch_dir_path=opts.model_epoch_path,
overwrite_mappings=opts.overwrite_mappings)
parameters = model.parameters
else:
# Initialize model
model = MainTaggerModel(opts=opts,
parameters=parameters,
models_path=models_path, overwrite_mappings=opts.overwrite_mappings)
print("MainTaggerModel location: {}".format(model.model_path))
# Prepare the data
# dev_data, _, \
# id_to_tag, tag_scheme, test_data, \
# train_data, train_stats, word_to_id, \
# yuret_test_data, yuret_train_data = prepare_datasets(model, opts, parameters)
data_dict, id_to_tag, word_to_id, stats_dict, id_to_char, id_to_morpho_tag = prepare_datasets(model, opts, parameters)
batch_size = opts.batch_size
# Build the model
model.build(training=True, **parameters)
if opts.reload == 1 and opts.model_epoch_path:
print("Resuming from %s" % os.path.join(models_path, opts.model_path, opts.model_epoch_path))
model.reload(os.path.join(models_path, opts.model_path, opts.model_epoch_path))
### At this point, the training data is encoded in our format.
#
# Train network
#
starting_epoch_no = opts.starting_epoch_no
maximum_epoch_no = opts.maximum_epochs # number of epochs over the training set
tracked_epoch_window_width = 10
last_epoch_with_best_scores = 1
last_N_epochs_avg_loss_values = [0] * tracked_epoch_window_width
best_dev = -np.inf
best_test = -np.inf
if model.parameters['active_models'] in [1, 2, 3]:
best_morph_dev = -np.inf
best_morph_test = -np.inf
model.trainer.set_clip_threshold(5.0)
def update_loss(sentences_in_the_batch, loss_function):
loss = loss_function(sentences_in_the_batch)
loss.backward()
model.trainer.update()
if loss.value() / batch_size >= (10000000000.0 - 1):
logging.error("BEEP")
return loss.value()
for epoch_no in range(starting_epoch_no, maximum_epoch_no+1):
start_time = time.time()
epoch_costs = []
print("Starting epoch {}...".format(epoch_no))
n_samples_trained = 0
loss_configuration_parameters = {}
train_data = []
for label in ["ner", "md"]:
for purpose in ["train"]:
train_data += data_dict[label][purpose]
shuffled_data = list(train_data)
random.shuffle(shuffled_data)
index = 0
while index < len(shuffled_data):
batch_data = shuffled_data[index:(index + batch_size)]
epoch_costs += [update_loss(batch_data,
loss_function=partial(model.get_loss,
loss_configuration_parameters=loss_configuration_parameters))]
n_samples_trained += batch_size
index += batch_size
if n_samples_trained % 50 == 0 and n_samples_trained != 0:
sys.stdout.write("%s%f " % ("G", np.mean(epoch_costs[-50:])))
sys.stdout.flush()
if np.mean(epoch_costs[-50:]) > 100:
logging.error("BEEP")
print("")
print("Epoch {epoch_no} Avg. loss over training set: {epoch_loss_mean}".format(epoch_no=epoch_no,
epoch_loss_mean=np.mean(epoch_costs)))
model.trainer.status()
last_N_epochs_avg_loss_values = last_N_epochs_avg_loss_values[1:] + [np.mean(epoch_costs)]
# datasets_to_be_tested = {"ner": {"dev": data_dict["ner"]["dev"], "test": data_dict["ner"]["test"]},
# "md": {"dev": data_dict["md"]["dev"], "test": data_dict["md"]["test"]}}
datasets_to_be_tested = {label: {purpose: data_dict[label][purpose]
for purpose in ["dev", "test"] if purpose in data_dict[label]}
for label in ["ner", "md"]}
f_scores, morph_accuracies, _, test_metrics = eval_with_specific_model(model,
epoch_no,
datasets_to_be_tested,
return_datasets_with_predicted_labels=False)
metrics_by_type = test_metrics[1]
if model.parameters['active_models'] in [0, 2, 3]:
if "dev" in f_scores["ner"]:
if best_dev < f_scores["ner"]["dev"]:
print("NER Epoch: %d New best dev score => best_dev, best_test: %lf %lf" % (epoch_no,
f_scores["ner"]["dev"],
f_scores["ner"]["test"]))
print("NER Epoch: %d |" % epoch_no + "|".join(["%s: %2.3lf" % (entity_type, m.fscore)
for entity_type, m in sorted(metrics_by_type.items(), key=lambda x: x[0])]))
last_epoch_with_best_scores = epoch_no
best_dev = f_scores["ner"]["dev"]
best_test = f_scores["ner"]["test"]
model.save(epoch_no)
model.save_best_performances_and_costs(epoch_no,
best_performances=[f_scores["ner"]["dev"], f_scores["ner"]["test"]],
epoch_costs=epoch_costs)
model_epoch_dir_path = "model-epoch-%08d" % epoch_no
print("LOG: model_epoch_dir_path: {}".format(model_epoch_dir_path))
else:
print("NER Epoch: %d Best dev and accompanying test score, best_dev, best_test: %lf %lf" % (epoch_no,
best_dev,
best_test))
print("NER Epoch: %d |" % epoch_no + "|".join(["%s: %2.3lf" % (entity_type, m.fscore)
for entity_type, m in
sorted(metrics_by_type.items(), key=lambda x: x[0])]))
if model.parameters['active_models'] in [1, 2, 3]:
if "dev" in morph_accuracies["md"]:
if best_morph_dev < morph_accuracies["md"]["dev"]:
print("MORPH Epoch: %d New best dev score => best_dev, best_test: %lf %lf" %
(epoch_no, morph_accuracies["md"]["dev"], morph_accuracies["md"]["test"]))
best_morph_dev = morph_accuracies["md"]["dev"]
best_morph_test = morph_accuracies["md"]["test"]
else:
print("MORPH Epoch: %d Best dev and accompanying test score, best_dev, best_test: %lf %lf"
% (epoch_no, best_morph_dev, best_morph_test))
print("Epoch {} done. Average cost: {}".format(epoch_no, np.mean(epoch_costs)))
print("MainTaggerModel dir: {}".format(model.model_path))
print("Training took {} seconds for this epoch".format(time.time()-start_time))
if epoch_no-last_epoch_with_best_scores == 0 or epoch_no < last_epoch_with_best_scores + 10:
print("Continue to train as the last peoch with best scores was only %d epochs before" % (epoch_no-last_epoch_with_best_scores))
else:
print("Stop training as the last epoch with best scores was %d epochs before" % (epoch_no-last_epoch_with_best_scores))
break
if __name__ == "__main__":
train(sys.argv)
| [
"toolkit.joint_ner_and_md_model.MainTaggerModel",
"logging.error",
"functools.partial",
"logging.basicConfig",
"random.shuffle",
"utils.evaluation.eval_with_specific_model",
"time.time",
"os.path.isfile",
"numpy.mean",
"sys.stdout.flush",
"utils.loader.prepare_datasets",
"os.path.join",
"log... | [((426, 465), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (445, 465), False, 'import logging\n'), ((475, 500), 'logging.getLogger', 'logging.getLogger', (['"""main"""'], {}), "('main')\n", (492, 500), False, 'import logging\n'), ((658, 697), 'utils.read_parameters_from_sys_argv', 'read_parameters_from_sys_argv', (['sys_argv'], {}), '(sys_argv)\n', (687, 697), False, 'from utils import models_path, eval_script, eval_logs_dir, read_parameters_from_sys_argv\n'), ((1815, 1856), 'utils.loader.prepare_datasets', 'prepare_datasets', (['model', 'opts', 'parameters'], {}), '(model, opts, parameters)\n', (1831, 1856), False, 'from utils.loader import prepare_datasets\n'), ((750, 777), 'os.path.isfile', 'os.path.isfile', (['eval_script'], {}), '(eval_script)\n', (764, 777), False, 'import os\n'), ((922, 1087), 'toolkit.joint_ner_and_md_model.MainTaggerModel', 'MainTaggerModel', ([], {'models_path': 'models_path', 'model_path': 'opts.model_path', 'model_epoch_dir_path': 'opts.model_epoch_path', 'overwrite_mappings': 'opts.overwrite_mappings'}), '(models_path=models_path, model_path=opts.model_path,\n model_epoch_dir_path=opts.model_epoch_path, overwrite_mappings=opts.\n overwrite_mappings)\n', (937, 1087), False, 'from toolkit.joint_ner_and_md_model import MainTaggerModel\n'), ((1266, 1388), 'toolkit.joint_ner_and_md_model.MainTaggerModel', 'MainTaggerModel', ([], {'opts': 'opts', 'parameters': 'parameters', 'models_path': 'models_path', 'overwrite_mappings': 'opts.overwrite_mappings'}), '(opts=opts, parameters=parameters, models_path=models_path,\n overwrite_mappings=opts.overwrite_mappings)\n', (1281, 1388), False, 'from toolkit.joint_ner_and_md_model import MainTaggerModel\n'), ((3171, 3182), 'time.time', 'time.time', ([], {}), '()\n', (3180, 3182), False, 'import time\n'), ((3543, 3572), 'random.shuffle', 'random.shuffle', (['shuffled_data'], {}), '(shuffled_data)\n', (3557, 3572), False, 'import random\n'), ((5186, 5299), 'utils.evaluation.eval_with_specific_model', 'eval_with_specific_model', (['model', 'epoch_no', 'datasets_to_be_tested'], {'return_datasets_with_predicted_labels': '(False)'}), '(model, epoch_no, datasets_to_be_tested,\n return_datasets_with_predicted_labels=False)\n', (5210, 5299), False, 'from utils.evaluation import eval_with_specific_model\n'), ((2133, 2198), 'os.path.join', 'os.path.join', (['models_path', 'opts.model_path', 'opts.model_epoch_path'], {}), '(models_path, opts.model_path, opts.model_epoch_path)\n', (2145, 2198), False, 'import os\n'), ((3032, 3053), 'logging.error', 'logging.error', (['"""BEEP"""'], {}), "('BEEP')\n", (3045, 3053), False, 'import logging\n'), ((2045, 2110), 'os.path.join', 'os.path.join', (['models_path', 'opts.model_path', 'opts.model_epoch_path'], {}), '(models_path, opts.model_path, opts.model_epoch_path)\n', (2057, 2110), False, 'import os\n'), ((4174, 4192), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4190, 4192), False, 'import sys\n'), ((4649, 4669), 'numpy.mean', 'np.mean', (['epoch_costs'], {}), '(epoch_costs)\n', (4656, 4669), True, 'import numpy as np\n'), ((8397, 8417), 'numpy.mean', 'np.mean', (['epoch_costs'], {}), '(epoch_costs)\n', (8404, 8417), True, 'import numpy as np\n'), ((4212, 4238), 'numpy.mean', 'np.mean', (['epoch_costs[-50:]'], {}), '(epoch_costs[-50:])\n', (4219, 4238), True, 'import numpy as np\n'), ((4266, 4287), 'logging.error', 'logging.error', (['"""BEEP"""'], {}), "('BEEP')\n", (4279, 4287), False, 'import logging\n'), ((4516, 4536), 'numpy.mean', 'np.mean', (['epoch_costs'], {}), '(epoch_costs)\n', (4523, 4536), True, 'import numpy as np\n'), ((8549, 8560), 'time.time', 'time.time', ([], {}), '()\n', (8558, 8560), False, 'import time\n'), ((3795, 3884), 'functools.partial', 'partial', (['model.get_loss'], {'loss_configuration_parameters': 'loss_configuration_parameters'}), '(model.get_loss, loss_configuration_parameters=\n loss_configuration_parameters)\n', (3802, 3884), False, 'from functools import partial\n'), ((4129, 4155), 'numpy.mean', 'np.mean', (['epoch_costs[-50:]'], {}), '(epoch_costs[-50:])\n', (4136, 4155), True, 'import numpy as np\n')] |
from meta_mb.utils.serializable import Serializable
import numpy as np
from gym.spaces import Box
class ImgWrapperEnv(Serializable):
def __init__(self, env, vae=None,
use_img=True, img_size=(64, 64, 3),
latent_dim=None, time_steps=4):
Serializable.quick_init(self, locals())
assert len(img_size) == 3
self._wrapped_env = env
self._vae = vae
self._use_img = use_img
self._img_size = img_size
self._num_chan = img_size[-1]
self._latent_dim = latent_dim
self._time_steps = time_steps
def step(self, action):
_, reward, done, info = self._wrapped_env.step(action)
obs = self.render('rgb_array', width=self._img_size[0], height=self._img_size[1]) / 255.
self._obs[:, :, self._num_chan:] = self._obs[:, :, :-self._num_chan]
self._obs[:, :, :self._num_chan] = obs
if self._vae is not None:
obs = self._vae.encode(self._obs).reshape((self._latent_dim,))
else:
obs = self._obs
return obs, reward, done, info
def reset(self):
_ = self._wrapped_env.reset()
self._obs = np.zeros(self._img_size[:-1] + (self._num_chan * self._time_steps,))
obs = self.render('rgb_array', width=self._img_size[0], height=self._img_size[1]) / 255.
self._obs[:, :, :self._num_chan] = obs
if self._vae is not None:
obs = self._vae.encode(self._obs).reshape((self._latent_dim,))
else:
obs = self._obs
return obs
@property
def observation_space(self):
if self._latent_dim is not None:
assert self._use_img
return Box(-1e6 * np.ones((self._latent_dim,)),
1e6 * np.ones((self._latent_dim,)), dtype=np.float32)
return Box(-1e6 * np.ones(self._img_size + (self._n_channels,)),
1e6 * np.ones(self._img_size + (self._n_channels,)),
dtype=np.float32)
def __getattr__(self, attr):
"""
If normalized env does not have the attribute then call the attribute in the wrapped_env
Args:
attr: attribute to get
Returns:
attribute of the wrapped_env
"""
# orig_attr = self._wrapped_env.__getattribute__(attr)
if hasattr(self._wrapped_env, '_wrapped_env'):
orig_attr = self._wrapped_env.__getattr__(attr)
else:
orig_attr = self._wrapped_env.__getattribute__(attr)
if callable(orig_attr):
def hooked(*args, **kwargs):
result = orig_attr(*args, **kwargs)
return result
return hooked
else:
return orig_attr
image_wrapper = ImgWrapperEnv | [
"numpy.zeros",
"numpy.ones"
] | [((1180, 1248), 'numpy.zeros', 'np.zeros', (['(self._img_size[:-1] + (self._num_chan * self._time_steps,))'], {}), '(self._img_size[:-1] + (self._num_chan * self._time_steps,))\n', (1188, 1248), True, 'import numpy as np\n'), ((1851, 1896), 'numpy.ones', 'np.ones', (['(self._img_size + (self._n_channels,))'], {}), '(self._img_size + (self._n_channels,))\n', (1858, 1896), True, 'import numpy as np\n'), ((1923, 1968), 'numpy.ones', 'np.ones', (['(self._img_size + (self._n_channels,))'], {}), '(self._img_size + (self._n_channels,))\n', (1930, 1968), True, 'import numpy as np\n'), ((1717, 1745), 'numpy.ones', 'np.ones', (['(self._latent_dim,)'], {}), '((self._latent_dim,))\n', (1724, 1745), True, 'import numpy as np\n'), ((1776, 1804), 'numpy.ones', 'np.ones', (['(self._latent_dim,)'], {}), '((self._latent_dim,))\n', (1783, 1804), True, 'import numpy as np\n')] |
"""
Classes of variables for equations/terms.
"""
from __future__ import print_function
from __future__ import absolute_import
from collections import deque
import numpy as nm
from sfepy.base.base import (real_types, complex_types, assert_, get_default,
output, OneTypeList, Container, Struct, basestr,
iter_dict_of_lists)
from sfepy.base.timing import Timer
import sfepy.linalg as la
from sfepy.discrete.functions import Function
from sfepy.discrete.conditions import get_condition_value
from sfepy.discrete.integrals import Integral
from sfepy.discrete.common.dof_info import (DofInfo, EquationMap,
expand_nodes_to_equations,
is_active_bc)
from sfepy.discrete.fem.lcbc_operators import LCBCOperators
from sfepy.discrete.common.mappings import get_physical_qps
from sfepy.discrete.evaluate_variable import eval_real, eval_complex
import six
from six.moves import range
is_state = 0
is_virtual = 1
is_parameter = 2
is_field = 10
def create_adof_conns(conn_info, var_indx=None, active_only=True, verbose=True):
"""
Create active DOF connectivities for all variables referenced in
`conn_info`.
If a variable has not the equation mapping, a trivial mapping is assumed
and connectivity with all DOFs active is created.
DOF connectivity key is a tuple ``(primary variable name, region name,
type, is_trace flag)``.
Notes
-----
If `active_only` is False, the DOF connectivities contain all DOFs, with
the E(P)BC-constrained ones stored as `-1 - <DOF number>`, so that the full
connectivities can be reconstructed for the matrix graph creation.
"""
var_indx = get_default(var_indx, {})
def _create(var, econn):
offset = var_indx.get(var.name, slice(0, 0)).start
if var.eq_map is None:
eq = nm.arange(var.n_dof, dtype=nm.int32)
else:
if isinstance(var, DGFieldVariable):
eq = nm.arange(var.n_dof, dtype=nm.int32)
else:
if active_only:
eq = var.eq_map.eq
else:
eq = nm.arange(var.n_dof, dtype=nm.int32)
eq[var.eq_map.eq_ebc] = -1 - (var.eq_map.eq_ebc + offset)
eq[var.eq_map.master] = eq[var.eq_map.slave]
adc = create_adof_conn(eq, econn, var.n_components, offset)
return adc
def _assign(adof_conns, info, region, var, field, is_trace):
key = (var.name, region.name, info.dc_type.type, is_trace)
if not key in adof_conns:
econn = field.get_econn(info.dc_type, region, is_trace=is_trace)
if econn is None: return
adof_conns[key] = _create(var, econn)
if info.is_trace:
key = (var.name, region.name, info.dc_type.type, False)
if not key in adof_conns:
econn = field.get_econn(info.dc_type, region, is_trace=False)
adof_conns[key] = _create(var, econn)
if verbose:
output('setting up dof connectivities...')
timer = Timer(start=True)
adof_conns = {}
for key, ii, info in iter_dict_of_lists(conn_info, return_keys=True):
if info.primary is not None:
var = info.primary
field = var.get_field()
field.setup_extra_data(info.ps_tg, info, info.is_trace)
region = info.get_region()
_assign(adof_conns, info, region, var, field, info.is_trace)
if info.has_virtual and not info.is_trace:
var = info.virtual
field = var.get_field()
field.setup_extra_data(info.v_tg, info, False)
aux = var.get_primary()
var = aux if aux is not None else var
region = info.get_region(can_trace=False)
_assign(adof_conns, info, region, var, field, False)
if verbose:
output('...done in %.2f s' % timer.stop())
return adof_conns
def create_adof_conn(eq, conn, dpn, offset):
"""
Given a node connectivity, number of DOFs per node and equation mapping,
create the active dof connectivity.
Locally (in a connectivity row), the DOFs are stored DOF-by-DOF (u_0 in all
local nodes, u_1 in all local nodes, ...).
Globally (in a state vector), the DOFs are stored node-by-node (u_0, u_1,
..., u_X in node 0, u_0, u_1, ..., u_X in node 1, ...).
"""
if dpn == 1:
aux = nm.take(eq, conn)
adc = aux + nm.asarray(offset * (aux >= 0), dtype=nm.int32)
else:
n_el, n_ep = conn.shape
adc = nm.empty((n_el, n_ep * dpn), dtype=conn.dtype)
ii = 0
for idof in range(dpn):
aux = nm.take(eq, dpn * conn + idof)
adc[:, ii : ii + n_ep] = aux + nm.asarray(offset * (aux >= 0),
dtype=nm.int32)
ii += n_ep
return adc
def expand_basis(basis, dpn):
"""
Expand basis for variables with several components (DOFs per node), in a
way compatible with :func:`create_adof_conn()`, according to `dpn`
(DOF-per-node count).
"""
n_c, n_bf = basis.shape[-2:]
ebasis = nm.zeros(basis.shape[:2] + (dpn, n_bf * dpn), dtype=nm.float64)
for ic in range(n_c):
for ir in range(dpn):
ebasis[..., n_c*ir+ic, ir*n_bf:(ir+1)*n_bf] = basis[..., ic, :]
return ebasis
class Variables(Container):
"""
Container holding instances of Variable.
"""
@staticmethod
def from_conf(conf, fields):
"""
This method resets the variable counters for automatic order!
"""
Variable.reset()
obj = Variables()
for key, val in six.iteritems(conf):
var = Variable.from_conf(key, val, fields)
obj[var.name] = var
obj.setup_dtype()
obj.setup_ordering()
return obj
def __init__(self, variables=None):
Container.__init__(self, OneTypeList(Variable),
state=set(),
virtual=set(),
parameter=set(),
has_virtual_dcs=False,
has_lcbc=False,
has_lcbc_rhs=False,
has_eq_map=False,
ordered_state=[],
ordered_virtual=[])
if variables is not None:
for var in variables:
self[var.name] = var
self.setup_ordering()
self.setup_dtype()
self.adof_conns = {}
def __setitem__(self, ii, var):
Container.__setitem__(self, ii, var)
if var.is_state():
self.state.add(var.name)
elif var.is_virtual():
self.virtual.add(var.name)
elif var.is_parameter():
self.parameter.add(var.name)
var._variables = self
self.setup_ordering()
self.setup_dof_info()
def setup_dtype(self):
"""
Setup data types of state variables - all have to be of the same
data type, one of nm.float64 or nm.complex128.
"""
dtypes = {nm.complex128 : 0, nm.float64 : 0}
for var in self.iter_state(ordered=False):
dtypes[var.dtype] += 1
if dtypes[nm.float64] and dtypes[nm.complex128]:
raise ValueError("All variables must have the same dtype!")
elif dtypes[nm.float64]:
self.dtype = nm.float64
elif dtypes[nm.complex128]:
self.dtype = nm.complex128
else:
self.dtype = None
def link_duals(self):
"""
Link state variables with corresponding virtual variables,
and assign link to self to each variable instance.
Usually, when solving a PDE in the weak form, each state
variable has a corresponding virtual variable.
"""
for ii in self.state:
self[ii].dual_var_name = None
for ii in self.virtual:
vvar = self[ii]
try:
self[vvar.primary_var_name].dual_var_name = vvar.name
except IndexError:
pass
def get_dual_names(self):
"""
Get names of pairs of dual variables.
Returns
-------
duals : dict
The dual names as virtual name : state name pairs.
"""
duals = {}
for name in self.virtual:
duals[name] = self[name].primary_var_name
return duals
def setup_ordering(self):
"""
Setup ordering of variables.
"""
self.link_duals()
orders = []
for var in self:
try:
orders.append(var._order)
except:
pass
orders.sort()
self.ordered_state = [None] * len(self.state)
for var in self.iter_state(ordered=False):
ii = orders.index(var._order)
self.ordered_state[ii] = var.name
self.ordered_virtual = [None] * len(self.virtual)
ii = 0
for var in self.iter_state(ordered=False):
if var.dual_var_name is not None:
self.ordered_virtual[ii] = var.dual_var_name
ii += 1
def has_virtuals(self):
return len(self.virtual) > 0
def setup_dof_info(self, make_virtual=False):
"""
Setup global DOF information.
"""
self.di = DofInfo('state_dof_info')
for var_name in self.ordered_state:
self.di.append_variable(self[var_name])
if make_virtual:
self.vdi = DofInfo('virtual_dof_info')
for var_name in self.ordered_virtual:
self.vdi.append_variable(self[var_name])
else:
self.vdi = self.di
def setup_lcbc_operators(self, lcbcs, ts=None, functions=None):
"""
Prepare linear combination BC operator matrix and right-hand side
vector.
"""
from sfepy.discrete.common.region import are_disjoint
if lcbcs is None:
self.lcdi = self.adi
return
self.lcbcs = lcbcs
if (ts is None) or ((ts is not None) and (ts.step == 0)):
regs = []
var_names = []
for bcs in self.lcbcs:
for bc in bcs.iter_single():
vns = bc.get_var_names()
regs.append(bc.regions[0])
var_names.append(vns[0])
if bc.regions[1] is not None:
regs.append(bc.regions[1])
var_names.append(vns[1])
for i0 in range(len(regs) - 1):
for i1 in range(i0 + 1, len(regs)):
if ((var_names[i0] == var_names[i1])
and not are_disjoint(regs[i0], regs[i1])):
raise ValueError('regions %s and %s are not disjoint!'
% (regs[i0].name, regs[i1].name))
ops = LCBCOperators('lcbcs', self, functions=functions)
for bcs in self.lcbcs:
for bc in bcs.iter_single():
vns = bc.get_var_names()
dofs = [self[vn].dofs for vn in vns if vn is not None]
bc.canonize_dof_names(*dofs)
if not is_active_bc(bc, ts=ts, functions=functions):
continue
output('lcbc:', bc.name)
ops.add_from_bc(bc, ts)
aux = ops.make_global_operator(self.adi)
self.mtx_lcbc, self.vec_lcbc, self.lcdi = aux
self.has_lcbc = self.mtx_lcbc is not None
self.has_lcbc_rhs = self.vec_lcbc is not None
def get_lcbc_operator(self):
if self.has_lcbc:
return self.mtx_lcbc
else:
raise ValueError('no LCBC defined!')
def equation_mapping(self, ebcs, epbcs, ts, functions, problem=None,
active_only=True):
"""
Create the mapping of active DOFs from/to all DOFs for all state
variables.
Parameters
----------
ebcs : Conditions instance
The essential (Dirichlet) boundary conditions.
epbcs : Conditions instance
The periodic boundary conditions.
ts : TimeStepper instance
The time stepper.
functions : Functions instance
The user functions for boundary conditions.
problem : Problem instance, optional
The problem that can be passed to user functions as a context.
active_only : bool
If True, the active DOF info ``self.adi`` uses the reduced (active
DOFs only) numbering. Otherwise it is the same as ``self.di``.
Returns
-------
active_bcs : set
The set of boundary conditions active in the current time.
"""
self.ebcs = ebcs
self.epbcs = epbcs
##
# Assing EBC, PBC to variables and regions.
if ebcs is not None:
self.bc_of_vars = self.ebcs.group_by_variables()
else:
self.bc_of_vars = {}
if epbcs is not None:
self.bc_of_vars = self.epbcs.group_by_variables(self.bc_of_vars)
##
# List EBC nodes/dofs for each variable.
active_bcs = set()
for var_name in self.di.var_names:
var = self[var_name]
bcs = self.bc_of_vars.get(var.name, None)
var_di = self.di.get_info(var_name)
active = var.equation_mapping(bcs, var_di, ts, functions,
problem=problem)
active_bcs.update(active)
if self.has_virtual_dcs:
vvar = self[var.dual_var_name]
vvar_di = self.vdi.get_info(var_name)
active = vvar.equation_mapping(bcs, vvar_di, ts, functions,
problem=problem)
active_bcs.update(active)
self.adi = DofInfo('active_state_dof_info')
for var_name in self.ordered_state:
self.adi.append_variable(self[var_name], active=active_only)
if self.has_virtual_dcs:
self.avdi = DofInfo('active_virtual_dof_info')
for var_name in self.ordered_virtual:
self.avdi.append_variable(self[var_name], active=active_only)
else:
self.avdi = self.adi
self.has_eq_map = True
return active_bcs
def get_matrix_shape(self):
if not self.has_eq_map:
raise ValueError('call equation_mapping() first!')
return (self.avdi.ptr[-1], self.adi.ptr[-1])
def setup_initial_conditions(self, ics, functions):
self.ics = ics
self.ic_of_vars = self.ics.group_by_variables()
for var_name in self.di.var_names:
var = self[var_name]
ics = self.ic_of_vars.get(var.name, None)
if ics is None: continue
var.setup_initial_conditions(ics, self.di, functions)
for var_name in self.parameter:
var = self[var_name]
if hasattr(var, 'special') and ('ic' in var.special):
setter, sargs, skwargs = var._get_setter('ic', functions)
var.set_data(setter(*sargs, **skwargs))
output('IC data of %s set by %s()' % (var.name, setter.name))
def set_adof_conns(self, adof_conns):
"""
Set all active DOF connectivities to `self` as well as relevant
sub-dicts to the individual variables.
"""
self.adof_conns = adof_conns
for var in self:
var.adof_conns = {}
for key, val in six.iteritems(adof_conns):
if key[0] in self.names:
var = self[key[0]]
var.adof_conns[key] = val
var = var.get_dual()
if var is not None:
var.adof_conns[key] = val
def create_state_vector(self):
vec = nm.zeros((self.di.ptr[-1],), dtype=self.dtype)
return vec
def create_stripped_state_vector(self):
vec = nm.zeros((self.adi.ptr[-1],), dtype=self.dtype)
return vec
def apply_ebc(self, vec, force_values=None):
"""
Apply essential (Dirichlet) and periodic boundary conditions
defined for the state variables to vector `vec`.
"""
for var in self.iter_state():
var.apply_ebc(vec, self.di.indx[var.name].start, force_values)
def apply_ic(self, vec, force_values=None):
"""
Apply initial conditions defined for the state variables to
vector `vec`.
"""
for var in self.iter_state():
var.apply_ic(vec, self.di.indx[var.name].start, force_values)
def strip_state_vector(self, vec, follow_epbc=False, svec=None):
"""
Get the reduced DOF vector, with EBC and PBC DOFs removed.
Notes
-----
If 'follow_epbc' is True, values of EPBC master dofs are not simply
thrown away, but added to the corresponding slave dofs, just like when
assembling. For vectors with state (unknown) variables it should be set
to False, for assembled vectors it should be set to True.
"""
if svec is None:
svec = nm.empty((self.adi.ptr[-1],), dtype=self.dtype)
for var in self.iter_state():
aindx = self.adi.indx[var.name]
svec[aindx] = var.get_reduced(vec, self.di.indx[var.name].start,
follow_epbc)
return svec
def make_full_vec(self, svec, force_value=None, vec=None):
"""
Make a full DOF vector satisfying E(P)BCs from a reduced DOF
vector.
Parameters
----------
svec : array
The reduced DOF vector.
force_value : float, optional
Passing a `force_value` overrides the EBC values.
vec : array, optional
If given, the buffer for storing the result (zeroed).
Returns
-------
vec : array
The full DOF vector.
"""
self.check_vector_size(svec, stripped=True)
if self.has_lcbc:
if self.has_lcbc_rhs:
svec = self.mtx_lcbc * svec + self.vec_lcbc
else:
svec = self.mtx_lcbc * svec
if vec is None:
vec = self.create_state_vector()
for var in self.iter_state():
indx = self.di.indx[var.name]
aindx = self.adi.indx[var.name]
var.get_full(svec, aindx.start, force_value, vec, indx.start)
return vec
def has_ebc(self, vec, force_values=None):
for var_name in self.di.var_names:
eq_map = self[var_name].eq_map
i0 = self.di.indx[var_name].start
ii = i0 + eq_map.eq_ebc
if force_values is None:
if not nm.allclose(vec[ii], eq_map.val_ebc):
return False
else:
if isinstance(force_values, dict):
if not nm.allclose(vec[ii], force_values[var_name]):
return False
else:
if not nm.allclose(vec[ii], force_values):
return False
# EPBC.
if not nm.allclose(vec[i0+eq_map.master], vec[i0+eq_map.slave]):
return False
return True
def get_indx(self, var_name, stripped=False, allow_dual=False):
var = self[var_name]
if not var.is_state():
if allow_dual and var.is_virtual():
var_name = var.primary_var_name
else:
msg = '%s is not a state part' % var_name
raise IndexError(msg)
if stripped:
return self.adi.indx[var_name]
else:
return self.di.indx[var_name]
def check_vector_size(self, vec, stripped=False):
"""
Check whether the shape of the DOF vector corresponds to the
total number of DOFs of the state variables.
Parameters
----------
vec : array
The vector of DOF values.
stripped : bool
If True, the size of the DOF vector should be reduced,
i.e. without DOFs fixed by boundary conditions.
"""
if not stripped:
n_dof = self.di.get_n_dof_total()
if vec.size != n_dof:
msg = 'incompatible data size!' \
' (%d (variables) == %d (DOF vector))' \
% (n_dof, vec.size)
raise ValueError(msg)
else:
if self.has_lcbc:
n_dof = self.lcdi.get_n_dof_total()
else:
n_dof = self.adi.get_n_dof_total()
if vec.size != n_dof:
msg = 'incompatible data size!' \
' (%d (active variables) == %d (reduced DOF vector))' \
% (n_dof, vec.size)
raise ValueError(msg)
def get_state_part_view(self, state, var_name, stripped=False):
self.check_vector_size(state, stripped=stripped)
return state[self.get_indx(var_name, stripped)]
def set_state_part(self, state, part, var_name, stripped=False):
self.check_vector_size(state, stripped=stripped)
state[self.get_indx(var_name, stripped)] = part
def get_state_parts(self, vec=None):
"""
Return parts of a state vector corresponding to individual state
variables.
Parameters
----------
vec : array, optional
The state vector. If not given, then the data stored in the
variables are returned instead.
Returns
-------
out : dict
The dictionary of the state parts.
"""
if vec is not None:
self.check_vector_size(vec)
out = {}
for var in self.iter_state():
if vec is None:
out[var.name] = var()
else:
out[var.name] = vec[self.di.indx[var.name]]
return out
def set_data(self, data, step=0, ignore_unknown=False,
preserve_caches=False):
"""
Set data (vectors of DOF values) of variables.
Parameters
----------
data : array
The state vector or dictionary of {variable_name : data vector}.
step : int, optional
The time history step, 0 (default) = current.
ignore_unknown : bool, optional
Ignore unknown variable names if `data` is a dict.
preserve_caches : bool
If True, do not invalidate evaluate caches of variables.
"""
if data is None: return
if isinstance(data, dict):
for key, val in six.iteritems(data):
try:
var = self[key]
except (ValueError, IndexError):
if ignore_unknown:
pass
else:
raise KeyError('unknown variable! (%s)' % key)
else:
var.set_data(val, step=step,
preserve_caches=preserve_caches)
elif isinstance(data, nm.ndarray):
self.check_vector_size(data)
for ii in self.state:
var = self[ii]
var.set_data(data, self.di.indx[var.name], step=step,
preserve_caches=preserve_caches)
else:
raise ValueError('unknown data class! (%s)' % data.__class__)
def set_from_state(self, var_names, state, var_names_state):
"""
Set variables with names in `var_names` from state variables with names
in `var_names_state` using DOF values in the state vector `state`.
"""
self.check_vector_size(state)
if isinstance(var_names, basestr):
var_names = [var_names]
var_names_state = [var_names_state]
for ii, var_name in enumerate(var_names):
var_name_state = var_names_state[ii]
if self[var_name_state].is_state():
self[var_name].set_data(state, self.di.indx[var_name_state])
else:
msg = '%s is not a state part' % var_name_state
raise IndexError(msg)
def state_to_output(self, vec, fill_value=None, var_info=None,
extend=True, linearization=None):
"""
Convert a state vector to a dictionary of output data usable by
Mesh.write().
"""
di = self.di
if var_info is None:
self.check_vector_size(vec)
var_info = {}
for name in di.var_names:
var_info[name] = (False, name)
out = {}
for key, indx in six.iteritems(di.indx):
var = self[key]
if key not in list(var_info.keys()): continue
is_part, name = var_info[key]
if is_part:
aux = vec
else:
aux = vec[indx]
out.update(var.create_output(aux, key=name, extend=extend,
fill_value=fill_value,
linearization=linearization))
return out
def iter_state(self, ordered=True):
if ordered:
for ii in self.ordered_state:
yield self[ii]
else:
for ii in self.state:
yield self[ii]
def init_history(self):
for var in self.iter_state():
var.init_history()
def time_update(self, ts, functions, verbose=True):
if verbose:
output('updating variables...')
for var in self:
var.time_update(ts, functions)
if verbose:
output('...done')
def advance(self, ts):
for var in self.iter_state():
var.advance(ts)
class Variable(Struct):
_count = 0
_orders = []
_all_var_names = set()
@staticmethod
def reset():
Variable._count = 0
Variable._orders = []
Variable._all_var_names = set()
@staticmethod
def from_conf(key, conf, fields):
aux = conf.kind.split()
if len(aux) == 2:
kind, family = aux
elif len(aux) == 3:
kind, family = aux[0], '_'.join(aux[1:])
else:
raise ValueError('variable kind is 2 or 3 words! (%s)' % conf.kind)
history = conf.get('history', None)
if history is not None:
try:
history = int(history)
assert_(history >= 0)
except (ValueError, TypeError):
raise ValueError('history must be integer >= 0! (got "%s")'
% history)
order = conf.get('order', None)
if order is not None:
order = int(order)
primary_var_name = conf.get('dual', None)
if primary_var_name is None:
if hasattr(conf, 'like'):
primary_var_name = get_default(conf.like, '(set-to-None)')
else:
primary_var_name = None
special = conf.get('special', None)
if family == 'field':
try:
fld = fields[conf.field]
except IndexError:
msg = 'field "%s" does not exist!' % conf.field
raise KeyError(msg)
if "DG" in fld.family_name:
obj = DGFieldVariable(conf.name, kind, fld, order, primary_var_name,
special=special, key=key, history=history)
else:
obj = FieldVariable(conf.name, kind, fld, order, primary_var_name,
special=special, key=key, history=history)
else:
raise ValueError('unknown variable family! (%s)' % family)
return obj
def __init__(self, name, kind, order=None, primary_var_name=None,
special=None, flags=None, **kwargs):
Struct.__init__(self, name=name, **kwargs)
self.flags = set()
if flags is not None:
for flag in flags:
self.flags.add(flag)
self.indx = slice(None)
self.n_dof = None
self.step = 0
self.dt = 1.0
self.initial_condition = None
self.dual_var_name = None
self.eq_map = None
if self.is_virtual():
self.data = None
else:
self.data = deque()
self.data.append(None)
self._set_kind(kind, order, primary_var_name, special=special)
Variable._all_var_names.add(name)
def _set_kind(self, kind, order, primary_var_name, special=None):
if kind == 'unknown':
self.flags.add(is_state)
if order is not None:
if order in Variable._orders:
raise ValueError('order %d already used!' % order)
else:
self._order = order
Variable._orders.append(order)
else:
self._order = Variable._count
Variable._orders.append(self._order)
Variable._count += 1
self.dof_name = self.name
elif kind == 'test':
if primary_var_name == self.name:
raise ValueError('primary variable for %s cannot be %s!'
% (self.name, primary_var_name))
self.flags.add(is_virtual)
msg = 'test variable %s: related unknown missing' % self.name
self.primary_var_name = get_default(primary_var_name, None, msg)
self.dof_name = self.primary_var_name
elif kind == 'parameter':
self.flags.add(is_parameter)
msg = 'parameter variable %s: related unknown missing' % self.name
self.primary_var_name = get_default(primary_var_name, None, msg)
if self.primary_var_name == '(set-to-None)':
self.primary_var_name = None
self.dof_name = self.name
else:
self.dof_name = self.primary_var_name
if special is not None:
self.special = special
else:
raise NotImplementedError('unknown variable kind: %s' % kind)
self.kind = kind
def _setup_dofs(self, n_nod, n_components, val_shape):
"""
Setup number of DOFs and DOF names.
"""
self.n_nod = n_nod
self.n_components = n_components
self.val_shape = val_shape
self.n_dof = self.n_nod * self.n_components
self.dofs = [self.dof_name + ('.%d' % ii)
for ii in range(self.n_components)]
def get_primary(self):
"""
Get the corresponding primary variable.
Returns
-------
var : Variable instance
The primary variable, or `self` for state
variables or if `primary_var_name` is None, or None if no other
variables are defined.
"""
if self.is_state():
var = self
elif self.primary_var_name is not None:
if ((self._variables is not None)
and (self.primary_var_name in self._variables.names)):
var = self._variables[self.primary_var_name]
else:
var = None
else:
var = self
return var
def get_dual(self):
"""
Get the dual variable.
Returns
-------
var : Variable instance
The primary variable for non-state variables, or the dual
variable for state variables.
"""
if self.is_state():
if ((self._variables is not None)
and (self.dual_var_name in self._variables.names)):
var = self._variables[self.dual_var_name]
else:
var = None
else:
if ((self._variables is not None)
and (self.primary_var_name in self._variables.names)):
var = self._variables[self.primary_var_name]
else:
var = None
return var
def is_state(self):
return is_state in self.flags
def is_virtual(self):
return is_virtual in self.flags
def is_parameter(self):
return is_parameter in self.flags
def is_state_or_parameter(self):
return (is_state in self.flags) or (is_parameter in self.flags)
def is_kind(self, kind):
return eval('self.is_%s()' % kind)
def is_real(self):
return self.dtype in real_types
def is_complex(self):
return self.dtype in complex_types
def is_finite(self, step=0, derivative=None, dt=None):
return nm.isfinite(self(step=step, derivative=derivative, dt=dt)).all()
def get_primary_name(self):
if self.is_state():
name = self.name
else:
name = self.primary_var_name
return name
def init_history(self):
"""Initialize data of variables with history."""
if self.history is None: return
self.data = deque((self.history + 1) * [None])
self.step = 0
def time_update(self, ts, functions):
"""Implemented in subclasses."""
pass
def advance(self, ts):
"""
Advance in time the DOF state history. A copy of the DOF vector
is made to prevent history modification.
"""
if self.history is None: return
self.step = ts.step + 1
if self.history > 0:
# Copy the current step data to the history data, shift history,
# initialize if needed. The current step data are left intact.
# Note: cannot use self.data.rotate() due to data sharing with
# State.
for ii in range(self.history, 0, -1):
if self.data[ii] is None:
self.data[ii] = nm.empty_like(self.data[0])
self.data[ii][:] = self.data[ii - 1]
# Advance evaluate cache.
for step_cache in six.itervalues(self.evaluate_cache):
steps = sorted(step_cache.keys())
for step in steps:
if step is None:
# Special caches with possible custom advance()
# function.
for key, val in six.iteritems(step_cache[step]):
if hasattr(val, '__advance__'):
val.__advance__(ts, val)
elif -step < self.history:
step_cache[step-1] = step_cache[step]
if len(steps) and (steps[0] is not None):
step_cache.pop(steps[-1])
def init_data(self, step=0):
"""
Initialize the dof vector data of time step `step` to zeros.
"""
if self.is_state_or_parameter():
data = nm.zeros((self.n_dof,), dtype=self.dtype)
self.set_data(data, step=step)
def set_constant(self, val):
"""
Set the variable to a constant value.
"""
data = nm.empty((self.n_dof,), dtype=self.dtype)
data.fill(val)
self.set_data(data)
def set_data(self, data=None, indx=None, step=0,
preserve_caches=False):
"""
Set data (vector of DOF values) of the variable.
Parameters
----------
data : array
The vector of DOF values.
indx : int, optional
If given, `data[indx]` is used.
step : int, optional
The time history step, 0 (default) = current.
preserve_caches : bool
If True, do not invalidate evaluate caches of the variable.
"""
data = data.ravel()
if indx is None:
indx = slice(0, len(data))
else:
indx = slice(int(indx.start), int(indx.stop))
n_data_dof = indx.stop - indx.start
if self.n_dof != n_data_dof:
msg = 'incompatible data shape! (%d (variable) == %d (data))' \
% (self.n_dof, n_data_dof)
raise ValueError(msg)
elif (step > 0) or (-step >= len(self.data)):
raise ValueError('step %d out of range! ([%d, 0])'
% (step, -(len(self.data) - 1)))
else:
self.data[step] = data
self.indx = indx
if not preserve_caches:
self.invalidate_evaluate_cache(step=step)
def __call__(self, step=0, derivative=None, dt=None):
"""
Return vector of degrees of freedom of the variable.
Parameters
----------
step : int, default 0
The time step (0 means current, -1 previous, ...).
derivative : None or 'dt'
If not None, return time derivative of the DOF vector,
approximated by the backward finite difference.
Returns
-------
vec : array
The DOF vector. If `derivative` is None: a view of the data vector,
otherwise: required derivative of the DOF vector
at time step given by `step`.
Notes
-----
If the previous time step is requested in step 0, the step 0
DOF vector is returned instead.
"""
if derivative is None:
if (self.step == 0) and (step == -1):
data = self.data[0]
else:
data = self.data[-step]
if data is None:
raise ValueError('data of variable are not set! (%s, step %d)' \
% (self.name, step))
return data[self.indx]
else:
if self.history is None:
msg = 'set history type of variable %s to use derivatives!'\
% self.name
raise ValueError(msg)
dt = get_default(dt, self.dt)
return (self(step=step) - self(step=step-1)) / dt
def get_initial_condition(self):
if self.initial_condition is None:
return 0.0
else:
return self.initial_condition
class FieldVariable(Variable):
"""
A finite element field variable.
field .. field description of variable (borrowed)
"""
def __init__(self, name, kind, field, order=None, primary_var_name=None,
special=None, flags=None, history=None, **kwargs):
Variable.__init__(self, name, kind, order, primary_var_name,
special, flags, history=history, **kwargs)
self._set_field(field)
self.has_field = True
self.has_bc = True
self._variables = None
self.clear_evaluate_cache()
def _set_field(self, field):
"""
Set field of the variable.
Takes reference to a Field instance. Sets dtype according to
field.dtype. Sets `dim` attribute to spatial dimension.
"""
self.is_surface = field.is_surface
self.field = field
self._setup_dofs(field.n_nod, field.n_components, field.val_shape)
self.flags.add(is_field)
self.dtype = field.dtype
self.dim = field.domain.shape.dim
def _get_setter(self, kind, functions, **kwargs):
"""
Get the setter function of the variable and its arguments depending in
the setter kind.
"""
if not (hasattr(self, 'special') and (kind in self.special)):
return
setter_name = self.special[kind]
setter = functions[setter_name]
region = self.field.region
nod_list = self.field.get_dofs_in_region(region)
nods = nm.unique(nod_list)
coors = self.field.get_coor(nods)
if kind == 'setter':
sargs = (kwargs.get('ts'), coors)
elif kind == 'ic':
sargs = (coors, )
skwargs = {'region' : region}
return setter, sargs, skwargs
def get_field(self):
return self.field
def get_mapping(self, region, integral, integration,
get_saved=False, return_key=False):
"""
Get the reference element mapping of the underlying field.
See Also
--------
sfepy.discrete.common.fields.Field.get_mapping
"""
if region is None:
region = self.field.region
out = self.field.get_mapping(region, integral, integration,
get_saved=get_saved,
return_key=return_key)
return out
def get_dof_conn(self, dc_type, is_trace=False, trace_region=None):
"""
Get active dof connectivity of a variable.
Notes
-----
The primary and dual variables must have the same Region.
"""
if self.is_virtual():
var = self.get_primary()
# No primary variable can occur in single term evaluations.
var_name = var.name if var is not None else self.name
else:
var_name = self.name
if not is_trace:
region_name = dc_type.region_name
else:
aux = self.field.domain.regions[dc_type.region_name]
region = aux.get_mirror_region(trace_region)
region_name = region.name
key = (var_name, region_name, dc_type.type, is_trace)
dc = self.adof_conns[key]
return dc
def get_dof_info(self, active=False):
details = Struct(name='field_var_dof_details',
n_nod=self.n_nod,
dpn=self.n_components)
if active:
n_dof = self.n_adof
else:
n_dof = self.n_dof
return n_dof, details
def time_update(self, ts, functions):
"""
Store time step, set variable data for variables with the setter
function.
"""
if ts is not None:
self.dt = ts.dt
if hasattr(self, 'special') and ('setter' in self.special):
setter, sargs, skwargs = self._get_setter('setter', functions,
ts=ts)
self.set_data(setter(*sargs, **skwargs))
output('data of %s set by %s()' % (self.name, setter.name))
def set_from_qp(self, data_qp, integral, step=0):
"""
Set DOFs of variable using values in quadrature points
corresponding to the given integral.
"""
data_vertex = self.field.average_qp_to_vertices(data_qp, integral)
# Field nodes values.
data = self.field.interp_v_vals_to_n_vals(data_vertex)
data = data.ravel()
self.indx = slice(0, len(data))
self.data[step] = data
def set_from_mesh_vertices(self, data):
"""
Set the variable using values at the mesh vertices.
"""
ndata = self.field.interp_v_vals_to_n_vals(data)
self.set_data(ndata)
def set_from_function(self, fun, step=0):
"""
Set the variable data (the vector of DOF values) using a function of
space coordinates.
Parameters
----------
fun : callable
The function of coordinates returning DOF values of shape
`(n_coor, n_components)`.
step : int, optional
The time history step, 0 (default) = current.
"""
_, vv = self.field.set_dofs(fun, self.field.region, self.n_components)
self.set_data(vv.ravel(), step=step)
def equation_mapping(self, bcs, var_di, ts, functions, problem=None,
warn=False):
"""
Create the mapping of active DOFs from/to all DOFs.
Sets n_adof.
Returns
-------
active_bcs : set
The set of boundary conditions active in the current time.
"""
self.eq_map = EquationMap('eq_map', self.dofs, var_di)
if bcs is not None:
bcs.canonize_dof_names(self.dofs)
bcs.sort()
active_bcs = self.eq_map.map_equations(bcs, self.field, ts, functions,
problem=problem, warn=warn)
self.n_adof = self.eq_map.n_eq
return active_bcs
def setup_initial_conditions(self, ics, di, functions, warn=False):
"""
Setup of initial conditions.
"""
ics.canonize_dof_names(self.dofs)
ics.sort()
self.initial_condition = nm.zeros((di.n_dof[self.name],),
dtype=self.dtype)
for ic in ics:
region = ic.region
dofs, val = ic.dofs
if warn:
clean_msg = ('warning: ignoring nonexistent' \
' IC node (%s) in ' % self.name)
else:
clean_msg = None
nod_list = self.field.get_dofs_in_region(region)
if len(nod_list) == 0:
continue
fun = get_condition_value(val, functions, 'IC', ic.name)
if isinstance(fun, Function):
aux = fun
fun = lambda coors: aux(coors, ic=ic)
nods, vv = self.field.set_dofs(fun, region, len(dofs), clean_msg)
eq = expand_nodes_to_equations(nods, dofs, self.dofs)
self.initial_condition[eq] = nm.ravel(vv)
def get_data_shape(self, integral, integration='volume', region_name=None):
"""
Get element data dimensions for given approximation.
Parameters
----------
integral : Integral instance
The integral describing used numerical quadrature.
integration : 'volume', 'surface', 'surface_extra', 'point' or 'custom'
The term integration type.
region_name : str
The name of the region of the integral.
Returns
-------
data_shape : 5 ints
The `(n_el, n_qp, dim, n_en, n_comp)` for volume shape kind,
`(n_fa, n_qp, dim, n_fn, n_comp)` for surface shape kind and
`(n_nod, 0, 0, 1, n_comp)` for point shape kind.
Notes
-----
- `n_el`, `n_fa` = number of elements/facets
- `n_qp` = number of quadrature points per element/facet
- `dim` = spatial dimension
- `n_en`, `n_fn` = number of element/facet nodes
- `n_comp` = number of variable components in a point/node
- `n_nod` = number of element nodes
"""
aux = self.field.get_data_shape(integral, integration=integration,
region_name=region_name)
data_shape = aux + (self.n_components,)
return data_shape
def clear_evaluate_cache(self):
"""
Clear current evaluate cache.
"""
self.evaluate_cache = {}
def invalidate_evaluate_cache(self, step=0):
"""
Invalidate variable data in evaluate cache for time step given
by `step` (0 is current, -1 previous, ...).
This should be done, for example, prior to every nonlinear
solver iteration.
"""
for step_cache in six.itervalues(self.evaluate_cache):
for key in list(step_cache.keys()):
if key == step: # Given time step to clear.
step_cache.pop(key)
def evaluate(self, mode='val',
region=None, integral=None, integration=None,
step=0, time_derivative=None, is_trace=False,
trace_region=None, dt=None, bf=None):
"""
Evaluate various quantities related to the variable according to
`mode` in quadrature points defined by `integral`.
The evaluated data are cached in the variable instance in
`evaluate_cache` attribute.
Parameters
----------
mode : one of 'val', 'grad', 'div', 'cauchy_strain'
The evaluation mode.
region : Region instance, optional
The region where the evaluation occurs. If None, the
underlying field region is used.
integral : Integral instance, optional
The integral defining quadrature points in which the
evaluation occurs. If None, the first order volume integral
is created. Must not be None for surface integrations.
integration : 'volume', 'surface', 'surface_extra', or 'point'
The term integration type. If None, it is derived from
`integral`.
step : int, default 0
The time step (0 means current, -1 previous, ...).
time_derivative : None or 'dt'
If not None, return time derivative of the data,
approximated by the backward finite difference.
is_trace : bool, default False
Indicate evaluation of trace of the variable on a boundary
region.
dt : float, optional
The time step to be used if `derivative` is `'dt'`. If None,
the `dt` attribute of the variable is used.
bf : Base function, optional
The base function to be used in 'val' mode.
Returns
-------
out : array
The 4-dimensional array of shape
`(n_el, n_qp, n_row, n_col)` with the requested data,
where `n_row`, `n_col` depend on `mode`.
"""
if integration == 'custom':
msg = 'cannot use FieldVariable.evaluate() with custom integration!'
raise ValueError(msg)
step_cache = self.evaluate_cache.setdefault(mode, {})
cache = step_cache.setdefault(step, {})
field = self.field
if region is None:
region = field.region
if is_trace:
region = region.get_mirror_region(trace_region)
if (region is not field.region) and not region.is_empty:
assert_(field.region.contains(region))
if integral is None:
integral = Integral('aux_1', 1)
if integration is None:
integration = 'volume' if region.can_cells else 'surface'
geo, _, key = field.get_mapping(region, integral, integration,
return_key=True)
key += (time_derivative, is_trace)
if key in cache:
out = cache[key]
else:
vec = self(step=step, derivative=time_derivative, dt=dt)
ct = integration
if integration == 'surface_extra':
ct = 'volume'
conn = field.get_econn(ct, region, is_trace, integration)
shape = self.get_data_shape(integral, integration, region.name)
if self.dtype == nm.float64:
out = eval_real(vec, conn, geo, mode, shape, bf)
else:
out = eval_complex(vec, conn, geo, mode, shape, bf)
cache[key] = out
return out
def get_state_in_region(self, region, reshape=True, step=0):
"""
Get DOFs of the variable in the given region.
Parameters
----------
region : Region
The selected region.
reshape : bool
If True, reshape the DOF vector to a 2D array with the individual
components as columns. Otherwise a 1D DOF array of the form [all
DOFs in region node 0, all DOFs in region node 1, ...] is returned.
step : int, default 0
The time step (0 means current, -1 previous, ...).
Returns
-------
out : array
The selected DOFs.
"""
nods = self.field.get_dofs_in_region(region, merge=True)
eq = nm.empty((len(nods) * self.n_components,), dtype=nm.int32)
for idof in range(self.n_components):
eq[idof::self.n_components] = self.n_components * nods \
+ idof + self.indx.start
out = self.data[step][eq]
if reshape:
out.shape = (len(nods), self.n_components)
return out
def apply_ebc(self, vec, offset=0, force_values=None):
"""
Apply essential (Dirichlet) and periodic boundary conditions to
vector `vec`, starting at `offset`.
"""
eq_map = self.eq_map
ii = offset + eq_map.eq_ebc
# EBC,
if force_values is None:
vec[ii] = eq_map.val_ebc
else:
if isinstance(force_values, dict):
vec[ii] = force_values[self.name]
else:
vec[ii] = force_values
# EPBC.
vec[offset+eq_map.master] = vec[offset+eq_map.slave]
def apply_ic(self, vec, offset=0, force_values=None):
"""
Apply initial conditions conditions to vector `vec`, starting at
`offset`.
"""
ii = slice(offset, offset + self.n_dof)
if force_values is None:
vec[ii] = self.get_initial_condition()
else:
if isinstance(force_values, dict):
vec[ii] = force_values[self.name]
else:
vec[ii] = force_values
def get_reduced(self, vec, offset=0, follow_epbc=False):
"""
Get the reduced DOF vector, with EBC and PBC DOFs removed.
Notes
-----
The full vector starts in `vec` at `offset`. If 'follow_epbc' is True,
values of EPBC master DOFs are not simply thrown away, but added to the
corresponding slave DOFs, just like when assembling. For vectors with
state (unknown) variables it should be set to False, for assembled
vectors it should be set to True.
"""
eq_map = self.eq_map
ii = offset + eq_map.eqi
r_vec = vec[ii]
if follow_epbc:
master = offset + eq_map.master
slave = eq_map.eq[eq_map.slave]
ii = slave >= 0
la.assemble1d(r_vec, slave[ii], vec[master[ii]])
return r_vec
def get_full(self, r_vec, r_offset=0, force_value=None,
vec=None, offset=0):
"""
Get the full DOF vector satisfying E(P)BCs from a reduced DOF
vector.
Notes
-----
The reduced vector starts in `r_vec` at `r_offset`.
Passing a `force_value` overrides the EBC values. Optionally,
`vec` argument can be provided to store the full vector (in
place) starting at `offset`.
"""
if vec is None:
vec = nm.empty(self.n_dof, dtype=r_vec.dtype)
else:
vec = vec[offset:offset+self.n_dof]
eq_map = self.eq_map
r_vec = r_vec[r_offset:r_offset+eq_map.n_eq]
# EBC.
vec[eq_map.eq_ebc] = get_default(force_value, eq_map.val_ebc)
# Reduced vector values.
vec[eq_map.eqi] = r_vec
# EPBC.
vec[eq_map.master] = vec[eq_map.slave]
unused_dofs = self.field.get('unused_dofs')
if unused_dofs is not None:
vec[:] = self.field.restore_substituted(vec)
return vec
def create_output(self, vec=None, key=None, extend=True, fill_value=None,
linearization=None):
"""
Convert the DOF vector to a dictionary of output data usable by
Mesh.write().
Parameters
----------
vec : array, optional
An alternative DOF vector to be used instead of the variable
DOF vector.
key : str, optional
The key to be used in the output dictionary instead of the
variable name.
extend : bool
Extend the DOF values to cover the whole domain.
fill_value : float or complex
The value used to fill the missing DOF values if `extend` is True.
linearization : Struct or None
The linearization configuration for higher order approximations.
"""
linearization = get_default(linearization, Struct(kind='strip'))
if vec is None:
vec = self()
key = get_default(key, self.name)
aux = nm.reshape(vec,
(self.n_dof // self.n_components, self.n_components))
out = self.field.create_output(aux, self.name, dof_names=self.dofs,
key=key, extend=extend,
fill_value=fill_value,
linearization=linearization)
return out
def get_element_diameters(self, cells, mode, square=False):
"""Get diameters of selected elements."""
field = self.field
domain = field.domain
cells = nm.array(cells)
diameters = nm.empty((cells.shape[0],), dtype=nm.float64)
integral = Integral('i_tmp', 1)
vg, _ = field.get_mapping(field.region, integral, 'volume')
diameters = domain.get_element_diameters(cells, vg, mode, square=square)
return diameters
def save_as_mesh(self, filename):
"""
Save the field mesh and the variable values into a file for
visualization. Only the vertex values are stored.
"""
mesh = self.field.create_mesh(extra_nodes=False)
vec = self()
n_nod, n_dof, dpn = mesh.n_nod, self.n_dof, self.n_components
aux = nm.reshape(vec, (n_dof // dpn, dpn))
ext = self.field.extend_dofs(aux, 0.0)
out = {}
if self.field.approx_order != 0:
out[self.name] = Struct(name='output_data',
mode='vertex', data=ext,
var_name=self.name, dofs=self.dofs)
else:
ext.shape = (ext.shape[0], 1, ext.shape[1], 1)
out[self.name] = Struct(name='output_data',
mode='cell', data=ext,
var_name=self.name, dofs=self.dofs)
mesh.write(filename, io='auto', out=out)
def has_same_mesh(self, other):
"""
Returns
-------
flag : int
The flag can be either 'different' (different meshes), 'deformed'
(slightly deformed same mesh), or 'same' (same).
"""
f1 = self.field
f2 = other.field
c1 = f1.get_coor()
c2 = f2.get_coor()
if c1.shape != c2.shape:
flag = 'different'
else:
eps = 10.0 * nm.finfo(nm.float64).eps
if nm.allclose(c1, c2, rtol=eps, atol=0.0):
flag = 'same'
elif nm.allclose(c1, c2, rtol=0.1, atol=0.0):
flag = 'deformed'
else:
flag = 'different'
return flag
def get_interp_coors(self, strategy='interpolation', interp_term=None):
"""
Get the physical coordinates to interpolate into, based on the strategy
used.
"""
if strategy == 'interpolation':
coors = self.field.get_coor()
elif strategy == 'projection':
region = self.field.region
integral = Integral(term=interp_term)
coors = get_physical_qps(region, integral)
else:
raise ValueError('unknown interpolation strategy! (%s)' % strategy)
return coors
def evaluate_at(self, coors, mode='val', strategy='general',
close_limit=0.1, get_cells_fun=None,
cache=None, ret_cells=False,
ret_status=False, ret_ref_coors=False, verbose=False):
"""
Evaluate the variable in the given physical coordinates. Convenience
wrapper around :func:`Field.evaluate_at()
<sfepy.discrete.common.fields.Field.evaluate_at()>`, see its
docstring for more details.
"""
source_vals = self().reshape((self.n_nod, self.n_components))
out = self.field.evaluate_at(coors, source_vals,
mode=mode,
strategy=strategy,
close_limit=close_limit,
get_cells_fun=get_cells_fun,
cache=cache,
ret_cells=ret_cells,
ret_status=ret_status,
ret_ref_coors=ret_ref_coors,
verbose=verbose)
return out
def set_from_other(self, other, strategy='projection', close_limit=0.1):
"""
Set the variable using another variable. Undefined values (e.g. outside
the other mesh) are set to numpy.nan, or extrapolated.
Parameters
----------
strategy : 'projection' or 'interpolation'
The strategy to set the values: the L^2 orthogonal projection (not
implemented!), or a direct interpolation to the nodes (nodal
elements only!)
Notes
-----
If the other variable uses the same field mesh, the coefficients are
set directly.
"""
flag_same_mesh = self.has_same_mesh(other)
if flag_same_mesh == 'same':
self.set_data(other())
return
if strategy == 'interpolation':
coors = self.get_interp_coors(strategy)
elif strategy == 'projection':
## interp_term = Term() # TODO
## coors = self.get_interp_coors(strategy, interp_term)
pass
else:
raise ValueError('unknown interpolation strategy! (%s)' % strategy)
vals = other.evaluate_at(coors, strategy='general',
close_limit=close_limit)
if strategy == 'interpolation':
self.set_data(vals)
elif strategy == 'projection':
raise NotImplementedError('unsupported strategy! (%s)' % strategy)
else:
raise ValueError('unknown interpolation strategy! (%s)' % strategy)
class DGFieldVariable(FieldVariable):
"""
Fieald variable specificaly intended for use with DGFields, bypasses
application of EBC and EPBC as this is done in DGField.
Is instance checked in create_adof_conns.
"""
def __init__(self, name, kind, field, order=None, primary_var_name=None,
special=None, flags=None, history=None, **kwargs):
FieldVariable.__init__(self, name, kind, field, order=order,
primary_var_name=primary_var_name,
special=special, flags=flags,
history=history, **kwargs)
from sfepy.discrete.dg.fields import DGField
if isinstance(field, DGField):
pass
else:
raise ValueError("Attempted to use DGFieldVariable with non DGField!")
def apply_ebc(self, vec, offset=0, force_values=None):
pass
def get_full(self, r_vec, r_offset=0, force_value=None,
vec=None, offset=0):
"""
Get the full DOF vector satisfying E(P)BCs from a reduced DOF
vector.
Notes
-----
The reduced vector starts in `r_vec` at `r_offset`.
Passing a `force_value` overrides the EBC values. Optionally,
`vec` argument can be provided to store the full vector (in
place) starting at `offset`.
"""
if vec is None:
vec = nm.empty(self.n_dof, dtype=r_vec.dtype)
else:
vec = vec[offset:offset+self.n_dof]
eq_map = self.eq_map
r_vec = r_vec[r_offset:r_offset+eq_map.n_eq]
# overide to hotfix second application of EBCs
# # EBC.
# vec[eq_map.eq_ebc] = get_default(force_value, eq_map.val_ebc)
# Reduced vector values, for DG this is full vector as eq_map.eq
# contains all dofs, cf. create_adof_conns
vec[eq_map.eqi] = r_vec
# EPBC.
# vec[eq_map.master] = vec[eq_map.slave]
unused_dofs = self.field.get('unused_dofs')
if unused_dofs is not None:
vec[:] = self.field.restore_substituted(vec)
return vec | [
"sfepy.discrete.conditions.get_condition_value",
"numpy.ravel",
"numpy.empty",
"numpy.allclose",
"numpy.arange",
"sfepy.discrete.common.dof_info.is_active_bc",
"sfepy.base.base.assert_",
"sfepy.discrete.common.dof_info.DofInfo",
"six.iteritems",
"numpy.unique",
"collections.deque",
"sfepy.disc... | [((1762, 1787), 'sfepy.base.base.get_default', 'get_default', (['var_indx', '{}'], {}), '(var_indx, {})\n', (1773, 1787), False, 'from sfepy.base.base import real_types, complex_types, assert_, get_default, output, OneTypeList, Container, Struct, basestr, iter_dict_of_lists\n'), ((3236, 3283), 'sfepy.base.base.iter_dict_of_lists', 'iter_dict_of_lists', (['conn_info'], {'return_keys': '(True)'}), '(conn_info, return_keys=True)\n', (3254, 3283), False, 'from sfepy.base.base import real_types, complex_types, assert_, get_default, output, OneTypeList, Container, Struct, basestr, iter_dict_of_lists\n'), ((5260, 5323), 'numpy.zeros', 'nm.zeros', (['(basis.shape[:2] + (dpn, n_bf * dpn))'], {'dtype': 'nm.float64'}), '(basis.shape[:2] + (dpn, n_bf * dpn), dtype=nm.float64)\n', (5268, 5323), True, 'import numpy as nm\n'), ((5338, 5348), 'six.moves.range', 'range', (['n_c'], {}), '(n_c)\n', (5343, 5348), False, 'from six.moves import range\n'), ((3112, 3154), 'sfepy.base.base.output', 'output', (['"""setting up dof connectivities..."""'], {}), "('setting up dof connectivities...')\n", (3118, 3154), False, 'from sfepy.base.base import real_types, complex_types, assert_, get_default, output, OneTypeList, Container, Struct, basestr, iter_dict_of_lists\n'), ((3171, 3188), 'sfepy.base.timing.Timer', 'Timer', ([], {'start': '(True)'}), '(start=True)\n', (3176, 3188), False, 'from sfepy.base.timing import Timer\n'), ((4523, 4540), 'numpy.take', 'nm.take', (['eq', 'conn'], {}), '(eq, conn)\n', (4530, 4540), True, 'import numpy as nm\n'), ((4666, 4712), 'numpy.empty', 'nm.empty', (['(n_el, n_ep * dpn)'], {'dtype': 'conn.dtype'}), '((n_el, n_ep * dpn), dtype=conn.dtype)\n', (4674, 4712), True, 'import numpy as nm\n'), ((4748, 4758), 'six.moves.range', 'range', (['dpn'], {}), '(dpn)\n', (4753, 4758), False, 'from six.moves import range\n'), ((5368, 5378), 'six.moves.range', 'range', (['dpn'], {}), '(dpn)\n', (5373, 5378), False, 'from six.moves import range\n'), ((5787, 5806), 'six.iteritems', 'six.iteritems', (['conf'], {}), '(conf)\n', (5800, 5806), False, 'import six\n'), ((6716, 6752), 'sfepy.base.base.Container.__setitem__', 'Container.__setitem__', (['self', 'ii', 'var'], {}), '(self, ii, var)\n', (6737, 6752), False, 'from sfepy.base.base import real_types, complex_types, assert_, get_default, output, OneTypeList, Container, Struct, basestr, iter_dict_of_lists\n'), ((9545, 9570), 'sfepy.discrete.common.dof_info.DofInfo', 'DofInfo', (['"""state_dof_info"""'], {}), "('state_dof_info')\n", (9552, 9570), False, 'from sfepy.discrete.common.dof_info import DofInfo, EquationMap, expand_nodes_to_equations, is_active_bc\n'), ((11122, 11171), 'sfepy.discrete.fem.lcbc_operators.LCBCOperators', 'LCBCOperators', (['"""lcbcs"""', 'self'], {'functions': 'functions'}), "('lcbcs', self, functions=functions)\n", (11135, 11171), False, 'from sfepy.discrete.fem.lcbc_operators import LCBCOperators\n'), ((14125, 14157), 'sfepy.discrete.common.dof_info.DofInfo', 'DofInfo', (['"""active_state_dof_info"""'], {}), "('active_state_dof_info')\n", (14132, 14157), False, 'from sfepy.discrete.common.dof_info import DofInfo, EquationMap, expand_nodes_to_equations, is_active_bc\n'), ((15812, 15837), 'six.iteritems', 'six.iteritems', (['adof_conns'], {}), '(adof_conns)\n', (15825, 15837), False, 'import six\n'), ((16123, 16169), 'numpy.zeros', 'nm.zeros', (['(self.di.ptr[-1],)'], {'dtype': 'self.dtype'}), '((self.di.ptr[-1],), dtype=self.dtype)\n', (16131, 16169), True, 'import numpy as nm\n'), ((16248, 16295), 'numpy.zeros', 'nm.zeros', (['(self.adi.ptr[-1],)'], {'dtype': 'self.dtype'}), '((self.adi.ptr[-1],), dtype=self.dtype)\n', (16256, 16295), True, 'import numpy as nm\n'), ((25060, 25082), 'six.iteritems', 'six.iteritems', (['di.indx'], {}), '(di.indx)\n', (25073, 25082), False, 'import six\n'), ((28314, 28356), 'sfepy.base.base.Struct.__init__', 'Struct.__init__', (['self'], {'name': 'name'}), '(self, name=name, **kwargs)\n', (28329, 28356), False, 'from sfepy.base.base import real_types, complex_types, assert_, get_default, output, OneTypeList, Container, Struct, basestr, iter_dict_of_lists\n'), ((33468, 33502), 'collections.deque', 'deque', (['((self.history + 1) * [None])'], {}), '((self.history + 1) * [None])\n', (33473, 33502), False, 'from collections import deque\n'), ((35488, 35529), 'numpy.empty', 'nm.empty', (['(self.n_dof,)'], {'dtype': 'self.dtype'}), '((self.n_dof,), dtype=self.dtype)\n', (35496, 35529), True, 'import numpy as nm\n'), ((40050, 40069), 'numpy.unique', 'nm.unique', (['nod_list'], {}), '(nod_list)\n', (40059, 40069), True, 'import numpy as nm\n'), ((41864, 41941), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""field_var_dof_details"""', 'n_nod': 'self.n_nod', 'dpn': 'self.n_components'}), "(name='field_var_dof_details', n_nod=self.n_nod, dpn=self.n_components)\n", (41870, 41941), False, 'from sfepy.base.base import real_types, complex_types, assert_, get_default, output, OneTypeList, Container, Struct, basestr, iter_dict_of_lists\n'), ((44260, 44300), 'sfepy.discrete.common.dof_info.EquationMap', 'EquationMap', (['"""eq_map"""', 'self.dofs', 'var_di'], {}), "('eq_map', self.dofs, var_di)\n", (44271, 44300), False, 'from sfepy.discrete.common.dof_info import DofInfo, EquationMap, expand_nodes_to_equations, is_active_bc\n'), ((44848, 44898), 'numpy.zeros', 'nm.zeros', (['(di.n_dof[self.name],)'], {'dtype': 'self.dtype'}), '((di.n_dof[self.name],), dtype=self.dtype)\n', (44856, 44898), True, 'import numpy as nm\n'), ((47525, 47560), 'six.itervalues', 'six.itervalues', (['self.evaluate_cache'], {}), '(self.evaluate_cache)\n', (47539, 47560), False, 'import six\n'), ((52103, 52127), 'six.moves.range', 'range', (['self.n_components'], {}), '(self.n_components)\n', (52108, 52127), False, 'from six.moves import range\n'), ((55064, 55104), 'sfepy.base.base.get_default', 'get_default', (['force_value', 'eq_map.val_ebc'], {}), '(force_value, eq_map.val_ebc)\n', (55075, 55104), False, 'from sfepy.base.base import real_types, complex_types, assert_, get_default, output, OneTypeList, Container, Struct, basestr, iter_dict_of_lists\n'), ((56386, 56413), 'sfepy.base.base.get_default', 'get_default', (['key', 'self.name'], {}), '(key, self.name)\n', (56397, 56413), False, 'from sfepy.base.base import real_types, complex_types, assert_, get_default, output, OneTypeList, Container, Struct, basestr, iter_dict_of_lists\n'), ((56429, 56498), 'numpy.reshape', 'nm.reshape', (['vec', '(self.n_dof // self.n_components, self.n_components)'], {}), '(vec, (self.n_dof // self.n_components, self.n_components))\n', (56439, 56498), True, 'import numpy as nm\n'), ((57003, 57018), 'numpy.array', 'nm.array', (['cells'], {}), '(cells)\n', (57011, 57018), True, 'import numpy as nm\n'), ((57040, 57085), 'numpy.empty', 'nm.empty', (['(cells.shape[0],)'], {'dtype': 'nm.float64'}), '((cells.shape[0],), dtype=nm.float64)\n', (57048, 57085), True, 'import numpy as nm\n'), ((57106, 57126), 'sfepy.discrete.integrals.Integral', 'Integral', (['"""i_tmp"""', '(1)'], {}), "('i_tmp', 1)\n", (57114, 57126), False, 'from sfepy.discrete.integrals import Integral\n'), ((57656, 57692), 'numpy.reshape', 'nm.reshape', (['vec', '(n_dof // dpn, dpn)'], {}), '(vec, (n_dof // dpn, dpn))\n', (57666, 57692), True, 'import numpy as nm\n'), ((1925, 1961), 'numpy.arange', 'nm.arange', (['var.n_dof'], {'dtype': 'nm.int32'}), '(var.n_dof, dtype=nm.int32)\n', (1934, 1961), True, 'import numpy as nm\n'), ((4561, 4608), 'numpy.asarray', 'nm.asarray', (['(offset * (aux >= 0))'], {'dtype': 'nm.int32'}), '(offset * (aux >= 0), dtype=nm.int32)\n', (4571, 4608), True, 'import numpy as nm\n'), ((4778, 4808), 'numpy.take', 'nm.take', (['eq', '(dpn * conn + idof)'], {}), '(eq, dpn * conn + idof)\n', (4785, 4808), True, 'import numpy as nm\n'), ((6046, 6067), 'sfepy.base.base.OneTypeList', 'OneTypeList', (['Variable'], {}), '(Variable)\n', (6057, 6067), False, 'from sfepy.base.base import real_types, complex_types, assert_, get_default, output, OneTypeList, Container, Struct, basestr, iter_dict_of_lists\n'), ((9716, 9743), 'sfepy.discrete.common.dof_info.DofInfo', 'DofInfo', (['"""virtual_dof_info"""'], {}), "('virtual_dof_info')\n", (9723, 9743), False, 'from sfepy.discrete.common.dof_info import DofInfo, EquationMap, expand_nodes_to_equations, is_active_bc\n'), ((14333, 14367), 'sfepy.discrete.common.dof_info.DofInfo', 'DofInfo', (['"""active_virtual_dof_info"""'], {}), "('active_virtual_dof_info')\n", (14340, 14367), False, 'from sfepy.discrete.common.dof_info import DofInfo, EquationMap, expand_nodes_to_equations, is_active_bc\n'), ((17438, 17485), 'numpy.empty', 'nm.empty', (['(self.adi.ptr[-1],)'], {'dtype': 'self.dtype'}), '((self.adi.ptr[-1],), dtype=self.dtype)\n', (17446, 17485), True, 'import numpy as nm\n'), ((23008, 23027), 'six.iteritems', 'six.iteritems', (['data'], {}), '(data)\n', (23021, 23027), False, 'import six\n'), ((25944, 25975), 'sfepy.base.base.output', 'output', (['"""updating variables..."""'], {}), "('updating variables...')\n", (25950, 25975), False, 'from sfepy.base.base import real_types, complex_types, assert_, get_default, output, OneTypeList, Container, Struct, basestr, iter_dict_of_lists\n'), ((26078, 26095), 'sfepy.base.base.output', 'output', (['"""...done"""'], {}), "('...done')\n", (26084, 26095), False, 'from sfepy.base.base import real_types, complex_types, assert_, get_default, output, OneTypeList, Container, Struct, basestr, iter_dict_of_lists\n'), ((28784, 28791), 'collections.deque', 'deque', ([], {}), '()\n', (28789, 28791), False, 'from collections import deque\n'), ((34168, 34194), 'six.moves.range', 'range', (['self.history', '(0)', '(-1)'], {}), '(self.history, 0, -1)\n', (34173, 34194), False, 'from six.moves import range\n'), ((34425, 34460), 'six.itervalues', 'six.itervalues', (['self.evaluate_cache'], {}), '(self.evaluate_cache)\n', (34439, 34460), False, 'import six\n'), ((35284, 35325), 'numpy.zeros', 'nm.zeros', (['(self.n_dof,)'], {'dtype': 'self.dtype'}), '((self.n_dof,), dtype=self.dtype)\n', (35292, 35325), True, 'import numpy as nm\n'), ((38274, 38298), 'sfepy.base.base.get_default', 'get_default', (['dt', 'self.dt'], {}), '(dt, self.dt)\n', (38285, 38298), False, 'from sfepy.base.base import real_types, complex_types, assert_, get_default, output, OneTypeList, Container, Struct, basestr, iter_dict_of_lists\n'), ((42604, 42663), 'sfepy.base.base.output', 'output', (["('data of %s set by %s()' % (self.name, setter.name))"], {}), "('data of %s set by %s()' % (self.name, setter.name))\n", (42610, 42663), False, 'from sfepy.base.base import real_types, complex_types, assert_, get_default, output, OneTypeList, Container, Struct, basestr, iter_dict_of_lists\n'), ((45366, 45416), 'sfepy.discrete.conditions.get_condition_value', 'get_condition_value', (['val', 'functions', '"""IC"""', 'ic.name'], {}), "(val, functions, 'IC', ic.name)\n", (45385, 45416), False, 'from sfepy.discrete.conditions import get_condition_value\n'), ((45635, 45683), 'sfepy.discrete.common.dof_info.expand_nodes_to_equations', 'expand_nodes_to_equations', (['nods', 'dofs', 'self.dofs'], {}), '(nods, dofs, self.dofs)\n', (45660, 45683), False, 'from sfepy.discrete.common.dof_info import DofInfo, EquationMap, expand_nodes_to_equations, is_active_bc\n'), ((45726, 45738), 'numpy.ravel', 'nm.ravel', (['vv'], {}), '(vv)\n', (45734, 45738), True, 'import numpy as nm\n'), ((50338, 50358), 'sfepy.discrete.integrals.Integral', 'Integral', (['"""aux_1"""', '(1)'], {}), "('aux_1', 1)\n", (50346, 50358), False, 'from sfepy.discrete.integrals import Integral\n'), ((54247, 54295), 'sfepy.linalg.assemble1d', 'la.assemble1d', (['r_vec', 'slave[ii]', 'vec[master[ii]]'], {}), '(r_vec, slave[ii], vec[master[ii]])\n', (54260, 54295), True, 'import sfepy.linalg as la\n'), ((54833, 54872), 'numpy.empty', 'nm.empty', (['self.n_dof'], {'dtype': 'r_vec.dtype'}), '(self.n_dof, dtype=r_vec.dtype)\n', (54841, 54872), True, 'import numpy as nm\n'), ((56299, 56319), 'sfepy.base.base.Struct', 'Struct', ([], {'kind': '"""strip"""'}), "(kind='strip')\n", (56305, 56319), False, 'from sfepy.base.base import real_types, complex_types, assert_, get_default, output, OneTypeList, Container, Struct, basestr, iter_dict_of_lists\n'), ((57829, 57920), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""vertex"""', 'data': 'ext', 'var_name': 'self.name', 'dofs': 'self.dofs'}), "(name='output_data', mode='vertex', data=ext, var_name=self.name,\n dofs=self.dofs)\n", (57835, 57920), False, 'from sfepy.base.base import real_types, complex_types, assert_, get_default, output, OneTypeList, Container, Struct, basestr, iter_dict_of_lists\n'), ((58091, 58181), 'sfepy.base.base.Struct', 'Struct', ([], {'name': '"""output_data"""', 'mode': '"""cell"""', 'data': 'ext', 'var_name': 'self.name', 'dofs': 'self.dofs'}), "(name='output_data', mode='cell', data=ext, var_name=self.name, dofs=\n self.dofs)\n", (58097, 58181), False, 'from sfepy.base.base import real_types, complex_types, assert_, get_default, output, OneTypeList, Container, Struct, basestr, iter_dict_of_lists\n'), ((58800, 58839), 'numpy.allclose', 'nm.allclose', (['c1', 'c2'], {'rtol': 'eps', 'atol': '(0.0)'}), '(c1, c2, rtol=eps, atol=0.0)\n', (58811, 58839), True, 'import numpy as nm\n'), ((63766, 63805), 'numpy.empty', 'nm.empty', (['self.n_dof'], {'dtype': 'r_vec.dtype'}), '(self.n_dof, dtype=r_vec.dtype)\n', (63774, 63805), True, 'import numpy as nm\n'), ((2047, 2083), 'numpy.arange', 'nm.arange', (['var.n_dof'], {'dtype': 'nm.int32'}), '(var.n_dof, dtype=nm.int32)\n', (2056, 2083), True, 'import numpy as nm\n'), ((4852, 4899), 'numpy.asarray', 'nm.asarray', (['(offset * (aux >= 0))'], {'dtype': 'nm.int32'}), '(offset * (aux >= 0), dtype=nm.int32)\n', (4862, 4899), True, 'import numpy as nm\n'), ((11518, 11542), 'sfepy.base.base.output', 'output', (['"""lcbc:"""', 'bc.name'], {}), "('lcbc:', bc.name)\n", (11524, 11542), False, 'from sfepy.base.base import real_types, complex_types, assert_, get_default, output, OneTypeList, Container, Struct, basestr, iter_dict_of_lists\n'), ((15444, 15505), 'sfepy.base.base.output', 'output', (["('IC data of %s set by %s()' % (var.name, setter.name))"], {}), "('IC data of %s set by %s()' % (var.name, setter.name))\n", (15450, 15505), False, 'from sfepy.base.base import real_types, complex_types, assert_, get_default, output, OneTypeList, Container, Struct, basestr, iter_dict_of_lists\n'), ((19482, 19542), 'numpy.allclose', 'nm.allclose', (['vec[i0 + eq_map.master]', 'vec[i0 + eq_map.slave]'], {}), '(vec[i0 + eq_map.master], vec[i0 + eq_map.slave])\n', (19493, 19542), True, 'import numpy as nm\n'), ((26880, 26901), 'sfepy.base.base.assert_', 'assert_', (['(history >= 0)'], {}), '(history >= 0)\n', (26887, 26901), False, 'from sfepy.base.base import real_types, complex_types, assert_, get_default, output, OneTypeList, Container, Struct, basestr, iter_dict_of_lists\n'), ((27330, 27369), 'sfepy.base.base.get_default', 'get_default', (['conf.like', '"""(set-to-None)"""'], {}), "(conf.like, '(set-to-None)')\n", (27341, 27369), False, 'from sfepy.base.base import real_types, complex_types, assert_, get_default, output, OneTypeList, Container, Struct, basestr, iter_dict_of_lists\n'), ((29898, 29938), 'sfepy.base.base.get_default', 'get_default', (['primary_var_name', 'None', 'msg'], {}), '(primary_var_name, None, msg)\n', (29909, 29938), False, 'from sfepy.base.base import real_types, complex_types, assert_, get_default, output, OneTypeList, Container, Struct, basestr, iter_dict_of_lists\n'), ((30996, 31020), 'six.moves.range', 'range', (['self.n_components'], {}), '(self.n_components)\n', (31001, 31020), False, 'from six.moves import range\n'), ((51090, 51132), 'sfepy.discrete.evaluate_variable.eval_real', 'eval_real', (['vec', 'conn', 'geo', 'mode', 'shape', 'bf'], {}), '(vec, conn, geo, mode, shape, bf)\n', (51099, 51132), False, 'from sfepy.discrete.evaluate_variable import eval_real, eval_complex\n'), ((51174, 51219), 'sfepy.discrete.evaluate_variable.eval_complex', 'eval_complex', (['vec', 'conn', 'geo', 'mode', 'shape', 'bf'], {}), '(vec, conn, geo, mode, shape, bf)\n', (51186, 51219), False, 'from sfepy.discrete.evaluate_variable import eval_real, eval_complex\n'), ((58889, 58928), 'numpy.allclose', 'nm.allclose', (['c1', 'c2'], {'rtol': '(0.1)', 'atol': '(0.0)'}), '(c1, c2, rtol=0.1, atol=0.0)\n', (58900, 58928), True, 'import numpy as nm\n'), ((59418, 59444), 'sfepy.discrete.integrals.Integral', 'Integral', ([], {'term': 'interp_term'}), '(term=interp_term)\n', (59426, 59444), False, 'from sfepy.discrete.integrals import Integral\n'), ((59465, 59499), 'sfepy.discrete.common.mappings.get_physical_qps', 'get_physical_qps', (['region', 'integral'], {}), '(region, integral)\n', (59481, 59499), False, 'from sfepy.discrete.common.mappings import get_physical_qps\n'), ((2220, 2256), 'numpy.arange', 'nm.arange', (['var.n_dof'], {'dtype': 'nm.int32'}), '(var.n_dof, dtype=nm.int32)\n', (2229, 2256), True, 'import numpy as nm\n'), ((11426, 11470), 'sfepy.discrete.common.dof_info.is_active_bc', 'is_active_bc', (['bc'], {'ts': 'ts', 'functions': 'functions'}), '(bc, ts=ts, functions=functions)\n', (11438, 11470), False, 'from sfepy.discrete.common.dof_info import DofInfo, EquationMap, expand_nodes_to_equations, is_active_bc\n'), ((19071, 19107), 'numpy.allclose', 'nm.allclose', (['vec[ii]', 'eq_map.val_ebc'], {}), '(vec[ii], eq_map.val_ebc)\n', (19082, 19107), True, 'import numpy as nm\n'), ((30180, 30220), 'sfepy.base.base.get_default', 'get_default', (['primary_var_name', 'None', 'msg'], {}), '(primary_var_name, None, msg)\n', (30191, 30220), False, 'from sfepy.base.base import real_types, complex_types, assert_, get_default, output, OneTypeList, Container, Struct, basestr, iter_dict_of_lists\n'), ((34274, 34301), 'numpy.empty_like', 'nm.empty_like', (['self.data[0]'], {}), '(self.data[0])\n', (34287, 34301), True, 'import numpy as nm\n'), ((58759, 58779), 'numpy.finfo', 'nm.finfo', (['nm.float64'], {}), '(nm.float64)\n', (58767, 58779), True, 'import numpy as nm\n'), ((19238, 19282), 'numpy.allclose', 'nm.allclose', (['vec[ii]', 'force_values[var_name]'], {}), '(vec[ii], force_values[var_name])\n', (19249, 19282), True, 'import numpy as nm\n'), ((19370, 19404), 'numpy.allclose', 'nm.allclose', (['vec[ii]', 'force_values'], {}), '(vec[ii], force_values)\n', (19381, 19404), True, 'import numpy as nm\n'), ((34732, 34763), 'six.iteritems', 'six.iteritems', (['step_cache[step]'], {}), '(step_cache[step])\n', (34745, 34763), False, 'import six\n'), ((10918, 10950), 'sfepy.discrete.common.region.are_disjoint', 'are_disjoint', (['regs[i0]', 'regs[i1]'], {}), '(regs[i0], regs[i1])\n', (10930, 10950), False, 'from sfepy.discrete.common.region import are_disjoint\n')] |
#!/usr/bin/env python
#
# ----------------------------------------------------------------------
#
# <NAME>, U.S. Geological Survey
# <NAME>, GNS Science
# <NAME>, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
#
## @file tests/libtests/feassemble/data/IntegratorInertia3DLinear.py
## @brief Python application for generating C++ data files for testing
## C++ IntegratorInertia object with 3-D cell and linear basis
## functions.
from IntegratorInertia import IntegratorInertia
import numpy
# ----------------------------------------------------------------------
# IntegratorInertia3DLinear class
class IntegratorInertia3DLinear(IntegratorInertia):
"""
Python application for generating C++ data files for testing C++
IntegratorInertia object with 3-D cell and linear basis functions.
"""
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="integratorinertia3dlinear"):
"""
Constructor.
"""
IntegratorInertia.__init__(self, name)
from Quadrature3DLinear import Quadrature3DLinear
self.quadrature = Quadrature3DLinear()
self.numVertices = 4
self.numCells = 1
self.fiberDim = 3
self.vertices = numpy.array( [[-0.5, -1.0, -0.5],
[ 2.0, -0.5, -0.4],
[ 1.0, -0.1, -0.3],
[-0.2, 0.5, 2.0]], dtype=numpy.float64)
self.cells = numpy.array( [[0, 1, 2, 3]], dtype=numpy.int32)
self.fieldIn = numpy.array( [[ 1.2], [ 0.1], [-0.3],
[ 0.2], [-0.8], [ 1.2],
[ 1.3], [-0.2], [ 1.7],
[ 1.1], [ 1.4], [ 0.9]], dtype=numpy.float64)
return
# MAIN /////////////////////////////////////////////////////////////////
if __name__ == "__main__":
app = IntegratorInertia3DLinear()
app.run()
# End of file
| [
"IntegratorInertia.IntegratorInertia.__init__",
"Quadrature3DLinear.Quadrature3DLinear",
"numpy.array"
] | [((1243, 1281), 'IntegratorInertia.IntegratorInertia.__init__', 'IntegratorInertia.__init__', (['self', 'name'], {}), '(self, name)\n', (1269, 1281), False, 'from IntegratorInertia import IntegratorInertia\n'), ((1359, 1379), 'Quadrature3DLinear.Quadrature3DLinear', 'Quadrature3DLinear', ([], {}), '()\n', (1377, 1379), False, 'from Quadrature3DLinear import Quadrature3DLinear\n'), ((1479, 1594), 'numpy.array', 'numpy.array', (['[[-0.5, -1.0, -0.5], [2.0, -0.5, -0.4], [1.0, -0.1, -0.3], [-0.2, 0.5, 2.0]]'], {'dtype': 'numpy.float64'}), '([[-0.5, -1.0, -0.5], [2.0, -0.5, -0.4], [1.0, -0.1, -0.3], [-\n 0.2, 0.5, 2.0]], dtype=numpy.float64)\n', (1490, 1594), False, 'import numpy\n'), ((1714, 1760), 'numpy.array', 'numpy.array', (['[[0, 1, 2, 3]]'], {'dtype': 'numpy.int32'}), '([[0, 1, 2, 3]], dtype=numpy.int32)\n', (1725, 1760), False, 'import numpy\n'), ((1781, 1907), 'numpy.array', 'numpy.array', (['[[1.2], [0.1], [-0.3], [0.2], [-0.8], [1.2], [1.3], [-0.2], [1.7], [1.1], [\n 1.4], [0.9]]'], {'dtype': 'numpy.float64'}), '([[1.2], [0.1], [-0.3], [0.2], [-0.8], [1.2], [1.3], [-0.2], [\n 1.7], [1.1], [1.4], [0.9]], dtype=numpy.float64)\n', (1792, 1907), False, 'import numpy\n')] |
"""
Parametrized surfaces using a CoordinateMap
"""
import numpy as np
from nose.tools import assert_equal
from nipy.core.api import CoordinateMap, CoordinateSystem
from nipy.core.api import Grid
uv = CoordinateSystem('uv', 'input')
xyz = CoordinateSystem('xyz', 'output')
def parametric_mapping(vals):
"""
Parametrization of the surface x**2-y**2*z**2+z**3=0
"""
u = vals[:,0]
v = vals[:, 1]
o = np.array([v*(u**2-v**2),
u,
u**2-v**2]).T
return o
"""
Let's check that indeed this is a parametrization of that surface
"""
def implicit(vals):
x = vals[:,0]; y = vals[:,1]; z = vals[:,2]
return x**2-y**2*z**2+z**3
surface_param = CoordinateMap(parametric_mapping, uv, xyz)
def test_surface():
assert np.allclose(
implicit(
parametric_mapping(
np.random.standard_normal((40,2))
)
),
0)
def test_grid():
g = Grid(surface_param)
xyz_grid = g[-1:1:201j,-1:1:101j]
x, y, z = xyz_grid.transposed_values
yield assert_equal, x.shape, (201,101)
yield assert_equal, y.shape, (201,101)
yield assert_equal, z.shape, (201,101)
def test_grid32():
# Check that we can use a float32 input and output
uv32 = CoordinateSystem('uv', 'input', np.float32)
xyz32 = CoordinateSystem('xyz', 'output', np.float32)
surface32 = CoordinateMap(parametric_mapping, uv32, xyz32)
g = Grid(surface32)
xyz_grid = g[-1:1:201j,-1:1:101j]
x, y, z = xyz_grid.transposed_values
yield assert_equal, x.shape, (201,101)
yield assert_equal, y.shape, (201,101)
yield assert_equal, z.shape, (201,101)
yield assert_equal, x.dtype, np.dtype(np.float32)
def test_grid32_c128():
# Check that we can use a float32 input and complex128 output
uv32 = CoordinateSystem('uv', 'input', np.float32)
xyz128 = CoordinateSystem('xyz', 'output', np.complex128)
def par_c128(x):
return parametric_mapping(x).astype(np.complex128)
surface = CoordinateMap(par_c128, uv32, xyz128)
g = Grid(surface)
xyz_grid = g[-1:1:201j,-1:1:101j]
x, y, z = xyz_grid.transposed_values
yield assert_equal, x.shape, (201,101)
yield assert_equal, y.shape, (201,101)
yield assert_equal, z.shape, (201,101)
yield assert_equal, x.dtype, np.dtype(np.complex128)
def view_surface():
from enthought.mayavi import mlab
g = Grid(surface_param)
xyz_grid = g[-1:1:201j,-1:1:101j]
x, y, z = xyz_grid.transposed_values
mlab.mesh(x, y, z)
mlab.draw()
| [
"nipy.core.api.CoordinateMap",
"enthought.mayavi.mlab.draw",
"nipy.core.api.Grid",
"enthought.mayavi.mlab.mesh",
"numpy.dtype",
"nipy.core.api.CoordinateSystem",
"numpy.random.standard_normal",
"numpy.array"
] | [((204, 235), 'nipy.core.api.CoordinateSystem', 'CoordinateSystem', (['"""uv"""', '"""input"""'], {}), "('uv', 'input')\n", (220, 235), False, 'from nipy.core.api import CoordinateMap, CoordinateSystem\n'), ((242, 275), 'nipy.core.api.CoordinateSystem', 'CoordinateSystem', (['"""xyz"""', '"""output"""'], {}), "('xyz', 'output')\n", (258, 275), False, 'from nipy.core.api import CoordinateMap, CoordinateSystem\n'), ((708, 750), 'nipy.core.api.CoordinateMap', 'CoordinateMap', (['parametric_mapping', 'uv', 'xyz'], {}), '(parametric_mapping, uv, xyz)\n', (721, 750), False, 'from nipy.core.api import CoordinateMap, CoordinateSystem\n'), ((967, 986), 'nipy.core.api.Grid', 'Grid', (['surface_param'], {}), '(surface_param)\n', (971, 986), False, 'from nipy.core.api import Grid\n'), ((1281, 1324), 'nipy.core.api.CoordinateSystem', 'CoordinateSystem', (['"""uv"""', '"""input"""', 'np.float32'], {}), "('uv', 'input', np.float32)\n", (1297, 1324), False, 'from nipy.core.api import CoordinateMap, CoordinateSystem\n'), ((1337, 1382), 'nipy.core.api.CoordinateSystem', 'CoordinateSystem', (['"""xyz"""', '"""output"""', 'np.float32'], {}), "('xyz', 'output', np.float32)\n", (1353, 1382), False, 'from nipy.core.api import CoordinateMap, CoordinateSystem\n'), ((1399, 1445), 'nipy.core.api.CoordinateMap', 'CoordinateMap', (['parametric_mapping', 'uv32', 'xyz32'], {}), '(parametric_mapping, uv32, xyz32)\n', (1412, 1445), False, 'from nipy.core.api import CoordinateMap, CoordinateSystem\n'), ((1454, 1469), 'nipy.core.api.Grid', 'Grid', (['surface32'], {}), '(surface32)\n', (1458, 1469), False, 'from nipy.core.api import Grid\n'), ((1834, 1877), 'nipy.core.api.CoordinateSystem', 'CoordinateSystem', (['"""uv"""', '"""input"""', 'np.float32'], {}), "('uv', 'input', np.float32)\n", (1850, 1877), False, 'from nipy.core.api import CoordinateMap, CoordinateSystem\n'), ((1891, 1939), 'nipy.core.api.CoordinateSystem', 'CoordinateSystem', (['"""xyz"""', '"""output"""', 'np.complex128'], {}), "('xyz', 'output', np.complex128)\n", (1907, 1939), False, 'from nipy.core.api import CoordinateMap, CoordinateSystem\n'), ((2034, 2071), 'nipy.core.api.CoordinateMap', 'CoordinateMap', (['par_c128', 'uv32', 'xyz128'], {}), '(par_c128, uv32, xyz128)\n', (2047, 2071), False, 'from nipy.core.api import CoordinateMap, CoordinateSystem\n'), ((2080, 2093), 'nipy.core.api.Grid', 'Grid', (['surface'], {}), '(surface)\n', (2084, 2093), False, 'from nipy.core.api import Grid\n'), ((2427, 2446), 'nipy.core.api.Grid', 'Grid', (['surface_param'], {}), '(surface_param)\n', (2431, 2446), False, 'from nipy.core.api import Grid\n'), ((2530, 2548), 'enthought.mayavi.mlab.mesh', 'mlab.mesh', (['x', 'y', 'z'], {}), '(x, y, z)\n', (2539, 2548), False, 'from enthought.mayavi import mlab\n'), ((2553, 2564), 'enthought.mayavi.mlab.draw', 'mlab.draw', ([], {}), '()\n', (2562, 2564), False, 'from enthought.mayavi import mlab\n'), ((425, 478), 'numpy.array', 'np.array', (['[v * (u ** 2 - v ** 2), u, u ** 2 - v ** 2]'], {}), '([v * (u ** 2 - v ** 2), u, u ** 2 - v ** 2])\n', (433, 478), True, 'import numpy as np\n'), ((1711, 1731), 'numpy.dtype', 'np.dtype', (['np.float32'], {}), '(np.float32)\n', (1719, 1731), True, 'import numpy as np\n'), ((2335, 2358), 'numpy.dtype', 'np.dtype', (['np.complex128'], {}), '(np.complex128)\n', (2343, 2358), True, 'import numpy as np\n'), ((862, 896), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(40, 2)'], {}), '((40, 2))\n', (887, 896), True, 'import numpy as np\n')] |
import os
import imageio
import argparse
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import utils
def draw_distributions(filename, save_dir, type='mean', node_no=0, save_plots=False, plot_time=0.5):
file_desc = utils.get_file_info(filename)
layer = file_desc['layer_name']
batch_size = file_desc['batch_size']
freq = file_desc['recording_frequency_per_epoch']
means, stds = utils.load_mean_std_from_file(filename)
frames = []
if type=='both':
fig = plt.figure()
ax = fig.add_subplot(111)
for i in range(len(means)):
mean = np.mean(means[i].reshape((batch_size, -1))[:, node_no])
std = np.sum(np.square(stds[i].reshape((batch_size, -1))[:, node_no])) / batch_size
sns.distplot(np.random.normal(loc=mean, scale=std, size=1000), ax=ax, hist=False)
ax.axvline(mean, color='r', linestyle='-')
iteration = i % freq
epoch = i // freq
plt.title(f'Distribution for {layer} node {node_no}: Epoch-{epoch} Iteration-{iteration}')
plt.xlabel(f'Value')
plt.ylabel('Density')
fig.canvas.draw()
if save_plots:
frame = np.array(fig.canvas.renderer.buffer_rgba())
frames.append(frame)
plt.pause(0.1)
ax.clear()
plt.close()
else:
data = means if type=='mean' else stds
fig = plt.figure()
ax = fig.add_subplot(111)
for i in range(len(data)):
sample = data[i].reshape((batch_size, -1))
sample = sample[:, node_no]
sns.distplot(sample, norm_hist=True, ax=ax)
ax.axvline(np.mean(sample), color='r', linestyle='-')
iteration = i % freq
epoch = i // freq
plt.title(f'Distribution for {layer} node {node_no}: Epoch-{epoch} Iteration-{iteration}')
plt.xlabel(f'Value of {type}')
plt.ylabel('Density')
fig.canvas.draw()
if save_plots:
frame = np.array(fig.canvas.renderer.buffer_rgba())
frames.append(frame)
plt.pause(0.1)
ax.clear()
plt.close()
if save_plots:
imageio.mimsave(save_dir + f'{layer}-node_{node_no}-{type}-distplot.gif', frames, fps=1/plot_time)
def draw_lineplot(filename, save_dir, type='mean', node_no=0, save_plots=False, plot_time=5):
file_desc = utils.get_file_info(filename)
layer = file_desc['layer_name']
means, stds = utils.load_mean_std_from_file(filename)
data = means if type=='mean' else stds
means = []
for i in range(len(data)):
sample = data[i].reshape((file_desc['batch_size'], -1))
means.append(np.mean(sample[:, node_no]))
x = np.hstack([np.arange(0, file_desc['number_of_epochs'], 1 / freq)])
sns.lineplot(x, means)
plt.title(f'Average value of {type} for node {node_no} of {layer}')
plt.xlabel('Epoch Number')
plt.ylabel(f'Average {type}s')
plt.show(block=False)
plt.pause(plot_time)
if save_plots:
plt.savefig(save_dir + f'{layer}-node_{node_no}-{type}-lineplot.jpg')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = "Visualize Mean and Variance")
parser.add_argument('--filename', type=str, help='path to log file', required=True)
parser.add_argument('--data_type', default='mean', type=str, help='Draw plots for what? mean or std?')
parser.add_argument('--node_no', default=0, type=int, help='Draw plots for which node?')
parser.add_argument('--plot_type', default='both', type=str, help='Which plot to draw? lineplot or distplot?')
parser.add_argument('--plot_time', default=1, type=float, help='Pause the plot for how much time?')
parser.add_argument('--save_plots', default=0, type=int, help='Save plots? 0 (No) or 1 (Yes)')
parser.add_argument('--save_dir', default='', type=str, help='Save plots to which directory?(End with a /)')
args = parser.parse_args()
save_dir = None
if args.save_plots:
save_dir = None if args.save_dir=='' else args.save_dir
if not save_dir:
save_dir = "/".join(args.filename.split("/")[:-1]) + '/plots/'
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
if args.plot_type=='lineplot':
draw_lineplot(args.filename, save_dir, args.data_type, args.node_no, bool(args.save_plots), args.plot_time)
elif args.plot_type=='distplot':
draw_distributions(args.filename, save_dir, args.data_type, args.node_no, bool(args.save_plots), args.plot_time)
else:
raise NotImplementedError | [
"matplotlib.pyplot.title",
"seaborn.lineplot",
"argparse.ArgumentParser",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"numpy.random.normal",
"utils.load_mean_std_from_file",
"imageio.mimsave",
"utils.get_file_info",
"matplotlib.pyplot.close",
"os.path.exists",
"matplotlib.pyplo... | [((247, 276), 'utils.get_file_info', 'utils.get_file_info', (['filename'], {}), '(filename)\n', (266, 276), False, 'import utils\n'), ((426, 465), 'utils.load_mean_std_from_file', 'utils.load_mean_std_from_file', (['filename'], {}), '(filename)\n', (455, 465), False, 'import utils\n'), ((2471, 2500), 'utils.get_file_info', 'utils.get_file_info', (['filename'], {}), '(filename)\n', (2490, 2500), False, 'import utils\n'), ((2555, 2594), 'utils.load_mean_std_from_file', 'utils.load_mean_std_from_file', (['filename'], {}), '(filename)\n', (2584, 2594), False, 'import utils\n'), ((2879, 2901), 'seaborn.lineplot', 'sns.lineplot', (['x', 'means'], {}), '(x, means)\n', (2891, 2901), True, 'import seaborn as sns\n'), ((2906, 2973), 'matplotlib.pyplot.title', 'plt.title', (['f"""Average value of {type} for node {node_no} of {layer}"""'], {}), "(f'Average value of {type} for node {node_no} of {layer}')\n", (2915, 2973), True, 'import matplotlib.pyplot as plt\n'), ((2978, 3004), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch Number"""'], {}), "('Epoch Number')\n", (2988, 3004), True, 'import matplotlib.pyplot as plt\n'), ((3009, 3039), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['f"""Average {type}s"""'], {}), "(f'Average {type}s')\n", (3019, 3039), True, 'import matplotlib.pyplot as plt\n'), ((3044, 3065), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (3052, 3065), True, 'import matplotlib.pyplot as plt\n'), ((3070, 3090), 'matplotlib.pyplot.pause', 'plt.pause', (['plot_time'], {}), '(plot_time)\n', (3079, 3090), True, 'import matplotlib.pyplot as plt\n'), ((3229, 3295), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Visualize Mean and Variance"""'}), "(description='Visualize Mean and Variance')\n", (3252, 3295), False, 'import argparse\n'), ((517, 529), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (527, 529), True, 'import matplotlib.pyplot as plt\n'), ((1374, 1385), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1383, 1385), True, 'import matplotlib.pyplot as plt\n'), ((1457, 1469), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1467, 1469), True, 'import matplotlib.pyplot as plt\n'), ((2220, 2231), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2229, 2231), True, 'import matplotlib.pyplot as plt\n'), ((2260, 2364), 'imageio.mimsave', 'imageio.mimsave', (["(save_dir + f'{layer}-node_{node_no}-{type}-distplot.gif')", 'frames'], {'fps': '(1 / plot_time)'}), "(save_dir + f'{layer}-node_{node_no}-{type}-distplot.gif',\n frames, fps=1 / plot_time)\n", (2275, 2364), False, 'import imageio\n'), ((3118, 3187), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_dir + f'{layer}-node_{node_no}-{type}-lineplot.jpg')"], {}), "(save_dir + f'{layer}-node_{node_no}-{type}-lineplot.jpg')\n", (3129, 3187), True, 'import matplotlib.pyplot as plt\n'), ((996, 1096), 'matplotlib.pyplot.title', 'plt.title', (['f"""Distribution for {layer} node {node_no}: Epoch-{epoch} Iteration-{iteration}"""'], {}), "(\n f'Distribution for {layer} node {node_no}: Epoch-{epoch} Iteration-{iteration}'\n )\n", (1005, 1096), True, 'import matplotlib.pyplot as plt\n'), ((1099, 1119), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['f"""Value"""'], {}), "(f'Value')\n", (1109, 1119), True, 'import matplotlib.pyplot as plt\n'), ((1132, 1153), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Density"""'], {}), "('Density')\n", (1142, 1153), True, 'import matplotlib.pyplot as plt\n'), ((1328, 1342), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (1337, 1342), True, 'import matplotlib.pyplot as plt\n'), ((1647, 1690), 'seaborn.distplot', 'sns.distplot', (['sample'], {'norm_hist': '(True)', 'ax': 'ax'}), '(sample, norm_hist=True, ax=ax)\n', (1659, 1690), True, 'import seaborn as sns\n'), ((1832, 1932), 'matplotlib.pyplot.title', 'plt.title', (['f"""Distribution for {layer} node {node_no}: Epoch-{epoch} Iteration-{iteration}"""'], {}), "(\n f'Distribution for {layer} node {node_no}: Epoch-{epoch} Iteration-{iteration}'\n )\n", (1841, 1932), True, 'import matplotlib.pyplot as plt\n'), ((1935, 1965), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['f"""Value of {type}"""'], {}), "(f'Value of {type}')\n", (1945, 1965), True, 'import matplotlib.pyplot as plt\n'), ((1978, 1999), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Density"""'], {}), "('Density')\n", (1988, 1999), True, 'import matplotlib.pyplot as plt\n'), ((2174, 2188), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (2183, 2188), True, 'import matplotlib.pyplot as plt\n'), ((2770, 2797), 'numpy.mean', 'np.mean', (['sample[:, node_no]'], {}), '(sample[:, node_no])\n', (2777, 2797), True, 'import numpy as np\n'), ((2819, 2872), 'numpy.arange', 'np.arange', (['(0)', "file_desc['number_of_epochs']", '(1 / freq)'], {}), "(0, file_desc['number_of_epochs'], 1 / freq)\n", (2828, 2872), True, 'import numpy as np\n'), ((4272, 4296), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (4286, 4296), False, 'import os\n'), ((4310, 4346), 'os.makedirs', 'os.makedirs', (['save_dir'], {'exist_ok': '(True)'}), '(save_dir, exist_ok=True)\n', (4321, 4346), False, 'import os\n'), ((797, 845), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'mean', 'scale': 'std', 'size': '(1000)'}), '(loc=mean, scale=std, size=1000)\n', (813, 845), True, 'import numpy as np\n'), ((1714, 1729), 'numpy.mean', 'np.mean', (['sample'], {}), '(sample)\n', (1721, 1729), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Author: <NAME> October 2018. MGP-Module adapted from Futoma et al. (https://arxiv.org/abs/1706.04152)
"""
import faulthandler
import os.path
import pickle
import sys
from time import time
import traceback
import numpy as np
from sacred import Experiment
from sklearn.metrics import roc_auc_score, average_precision_score
from tempfile import NamedTemporaryFile
import tensorflow as tf
from .memory_saving_gradients import gradients_memory
from ..preprocessing.main_preprocessing_mgp_tcn import load_data
from .tcn import CausalConv1D, TemporalBlock, TemporalConvNet
from .util import select_horizon, pad_rawdata_nomed, SE_kernel, OU_kernel, dot, \
CG, Lanczos, block_CG, block_Lanczos
from .util import mask_large_samples as ev_mask
# monkey patch tf.gradients to point to our custom version, with automatic checkpoint selection
tf.__dict__["gradients"] = gradients_memory
ex = Experiment('MGP-TCN')
def get_tcn_window(kernel_size, n_levels):
window = 1
for i in range(n_levels):
window += 2**i * (kernel_size-1)
return window
def mask_large_samples(data, thres, obs_min, static=None, return_mask=False):
"""Remove outliers by cutoff in order to fit data into memory (one outlier patient has 11k observation values)."""
result_data = []
n = len(data) #number of data views of compact format (values, times, indices, ..)
mask = data[8] <= thres
min_mask = data[8] >= obs_min #mask patients with less than n_mc_smps many num_obs_values
print('-> {} patients have less than {} observation values'.format(np.sum(~min_mask),obs_min))
mask = np.logical_and(mask, min_mask)
print('---> Removing {} patients'.format(np.sum(~mask)))
for i in np.arange(n):
result_data.append(data[i][mask])
if static is not None:
result_static = static[mask]
if return_mask:
return result_data, result_static, mask
else:
return result_data, result_static
else:
if return_mask:
return result_data, mask
else:
return result_data
def count_parameters():
"""Count the number of trainable parameters."""
total_parameters = 0
for variable in tf.trainable_variables():
# shape is an array of tf.Dimension
name = variable.name
shape = variable.get_shape()
#print(shape)
#print(len(shape))
variable_parameters = 1
for dim in shape:
#print(dim)
variable_parameters *= dim.value
print(name, [dim for dim in shape], variable_parameters)
total_parameters += variable_parameters
print('Number of trainable parameters = {}'.format(total_parameters))
def compute_l1():
#with tf.variable_scope("",reuse=True):
#conv1_kernel_val = tf.get_variable('temporal_conv_net/tblock_0/conv1/kernel')
kernel_name = 'temporal_conv_net/tblock_0/conv1/kernel:0'
bias_name = 'temporal_conv_net/tblock_0/conv1/bias:0'
gr = tf.get_default_graph()
conv1_kernel_val = gr.get_tensor_by_name(kernel_name) #.eval(session=sess)
conv1_bias_val = gr.get_tensor_by_name(bias_name)
kernel_abs = tf.abs(conv1_kernel_val) # compute absolute of kernel tensor
bias_abs = tf.abs(conv1_bias_val)
kernel_abs_sum = tf.reduce_sum(kernel_abs, axis=0) # sum over kernel size, such that each entry represents absolute sum of all filter values for one channel and one filter
bias_abs_sum = tf.reduce_sum(bias_abs)
l1_per_filter = tf.reduce_sum(kernel_abs_sum, axis=0) #compute l1 over all channels per filter, since all elements are positive: sum over each channel
l1_total = tf.reduce_sum(l1_per_filter) #sum over all filters of 1st convolution
#add bias term to l1_total:
l1_total += bias_abs_sum
#normalize norm-value such that it is independent of the filter shape (s.t. a l1 penalty coefficient always has same impact)
#compute total n of parameters of first conv1 layer:
n_param=0
for var in [conv1_kernel_val, conv1_bias_val]:
shape = var.get_shape()
n_var_param = 1.0
for dim in shape:
n_var_param *= dim.value
n_param += n_var_param
return l1_total/n_param
def compute_global_l2():
variables = tf.trainable_variables()
weights = [v for v in variables if 'kernel' in v.name ]
L2 = tf.add_n([ tf.nn.l2_loss(w) for w in weights ])
#print([w.name for w in weights])
'''weight_names = [w.name for w in weights]
values = sess.run(weight_names)
weight_dims = [w.shape for w in values]'''
weight_dims = [w.get_shape() for w in weights]
n_weights = 0
for weight_dim in weight_dims:
n_weights_per_kernel = 1.0
for dim in weight_dim:
n_weights_per_kernel *= dim.value #dim
n_weights += n_weights_per_kernel
print('N_weights:', n_weights)
print('Weight Dims:', weight_dims)
L2_per_weight = L2/n_weights
#print(L2_per_weight.eval(session=sess))
return L2_per_weight
#####
##### Convinience classes for managing parameters
#####
class DecompositionMethod():
valid_methods = ['chol', 'cg']
def __init__(self, methodname, add_diag=1e-3):
if methodname not in self.valid_methods:
raise ValueError('{} is not a valid methodname. Must be one of {}'.format(methodname, self.valid_methods))
self.methodname = methodname
self.add_diag = add_diag
class GPParameters():
def __init__(self, input_dim, M, n_mc_smps):
self.input_dim = input_dim
self.M = M
self.log_length = tf.Variable(tf.random_normal([1],mean=1,stddev=0.1),name="GP-log-length")
self.length = tf.exp(self.log_length)
#different noise level of each lab
self.log_noises = tf.Variable(tf.random_normal([input_dim],mean=-2,stddev=0.1),name="GP-log-noises")
self.noises = tf.exp(self.log_noises)
#init cov between labs
self.L_f_init = tf.Variable(tf.eye(input_dim),name="GP-Lf")
self.Lf = tf.matrix_band_part(self.L_f_init,-1,0)
self.Kf = tf.matmul(self.Lf,tf.transpose(self.Lf))
self.n_mc_smps = n_mc_smps
# Model-specific prepro function to reset times to 0 to 48 (counted from first provided measurement):
@ex.capture
def reset_times(train_data,validation_data,test_data):
train, val, test = train_data.copy(), validation_data.copy(), test_data.copy()
for dataset in [train, val, test]:
times = dataset[1].copy()
num_tcn_grid_times = []
tcn_grid_times = []
for i in range(len(times)):
times[i] = times[i]-min(times[i])
end_time = times[i][-1]
num_tcn_grid_time = int(np.floor(end_time)+1)
num_tcn_grid_times.append(num_tcn_grid_time)
tcn_grid_times.append(np.arange(num_tcn_grid_time))
dataset[1] = times
dataset[5] = np.array(num_tcn_grid_times)
dataset[6] = np.array(tcn_grid_times)
return train, val, test
#####
##### Tensorflow functions
#####
@ex.capture
def draw_GP(Yi,Ti,Xi,ind_kfi,ind_kti,method, gp_params):
"""
given GP hyperparams and data values at observation times, draw from
conditional GP
inputs:
length,noises,Lf,Kf: GP params
Yi: observation values
Ti: observation times
Xi: grid points (new times for tcn)
ind_kfi,ind_kti: indices into Y
returns:
draws from the GP at the evenly spaced grid times Xi, given hyperparams and data
"""
n_mc_smps, length, noises, Lf, Kf = gp_params.n_mc_smps, gp_params.length, gp_params.noises, gp_params.Lf, gp_params.Kf
M = gp_params.M
ny = tf.shape(Yi)[0]
K_tt = OU_kernel(length,Ti,Ti)
D = tf.diag(noises)
grid_f = tf.meshgrid(ind_kfi,ind_kfi) #same as np.meshgrid
Kf_big = tf.gather_nd(Kf,tf.stack((grid_f[0],grid_f[1]),-1))
grid_t = tf.meshgrid(ind_kti,ind_kti)
Kt_big = tf.gather_nd(K_tt,tf.stack((grid_t[0],grid_t[1]),-1))
Kf_Ktt = tf.multiply(Kf_big,Kt_big)
DI_big = tf.gather_nd(D,tf.stack((grid_f[0],grid_f[1]),-1))
DI = tf.diag(tf.diag_part(DI_big)) #D kron I
#data covariance.
#Either need to take Cholesky of this or use CG / block CG for matrix-vector products
Ky = Kf_Ktt + DI + method.add_diag*tf.eye(ny)
### build out cross-covariances and covariance at grid
nx = tf.shape(Xi)[0]
K_xx = OU_kernel(length,Xi,Xi)
K_xt = OU_kernel(length,Xi,Ti)
ind = tf.concat([tf.tile([i],[nx]) for i in range(M)],0)
grid = tf.meshgrid(ind,ind)
Kf_big = tf.gather_nd(Kf,tf.stack((grid[0],grid[1]),-1))
ind2 = tf.tile(tf.range(nx),[M])
grid2 = tf.meshgrid(ind2,ind2)
Kxx_big = tf.gather_nd(K_xx,tf.stack((grid2[0],grid2[1]),-1))
K_ff = tf.multiply(Kf_big,Kxx_big) #cov at grid points
full_f = tf.concat([tf.tile([i],[nx]) for i in range(M)],0)
grid_1 = tf.meshgrid(full_f,ind_kfi,indexing='ij')
Kf_big = tf.gather_nd(Kf,tf.stack((grid_1[0],grid_1[1]),-1))
full_x = tf.tile(tf.range(nx),[M])
grid_2 = tf.meshgrid(full_x,ind_kti,indexing='ij')
Kxt_big = tf.gather_nd(K_xt,tf.stack((grid_2[0],grid_2[1]),-1))
K_fy = tf.multiply(Kf_big,Kxt_big)
#now get draws!
y_ = tf.reshape(Yi,[-1,1])
xi = tf.random_normal((nx*M, n_mc_smps))
#print('xi shape:')
#print(xi.shape)
if method.methodname == 'chol':
Ly = tf.cholesky(Ky)
Mu = tf.matmul(K_fy,tf.cholesky_solve(Ly,y_))
Sigma = K_ff - tf.matmul(K_fy,tf.cholesky_solve(Ly,tf.transpose(K_fy))) + method.add_diag*tf.eye(tf.shape(K_ff)[0])
draw = Mu + tf.matmul(tf.cholesky(Sigma),xi)
draw_reshape = tf.transpose(tf.reshape(tf.transpose(draw),[n_mc_smps,M,nx]),perm=[0,2,1])
elif method.methodname == 'cg':
Mu = tf.matmul(K_fy,CG(Ky,y_)) #May be faster with CG for large problems
#Never need to explicitly compute Sigma! Just need matrix products with Sigma in Lanczos algorithm
def Sigma_mul(vec):
# vec must be a 2d tensor, shape (?,?)
return tf.matmul(K_ff,vec) - tf.matmul(K_fy,block_CG(Ky,tf.matmul(tf.transpose(K_fy),vec)))
def large_draw():
return Mu + block_Lanczos(Sigma_mul,xi,n_mc_smps) #no need to explicitly reshape Mu
draw = large_draw()
draw_reshape = tf.transpose(tf.reshape(tf.transpose(draw),[n_mc_smps,M,nx]),perm=[0,2,1])
return draw_reshape
@ex.capture
def get_GP_samples(Y,T,X,ind_kf,ind_kt,num_obs_times,num_obs_values,
num_tcn_grid_times, cov_grid, input_dim,method, gp_params, lab_vitals_only, pad_before): ##,med_cov_grid
"""
returns samples from GP at evenly-spaced gridpoints
"""
n_mc_smps, M = gp_params.n_mc_smps, gp_params.M
grid_max = tf.shape(X)[1]
Z = tf.zeros([0,grid_max,input_dim])
N = tf.shape(T)[0] #number of observations
#setup tf while loop (have to use this bc loop size is variable)
def cond(i,Z):
return i<N
def body(i,Z):
Yi = tf.reshape(tf.slice(Y,[i,0],[1,num_obs_values[i]]),[-1]) #MM: tf.reshape(x, [-1]) flattens tensor x (e.g. [2,3,1] to [6]), slice cuts out all Y data of one patient
Ti = tf.reshape(tf.slice(T,[i,0],[1,num_obs_times[i]]),[-1])
ind_kfi = tf.reshape(tf.slice(ind_kf,[i,0],[1,num_obs_values[i]]),[-1])
ind_kti = tf.reshape(tf.slice(ind_kt,[i,0],[1,num_obs_values[i]]),[-1])
Xi = tf.reshape(tf.slice(X,[i,0],[1,num_tcn_grid_times[i]]),[-1])
X_len = num_tcn_grid_times[i]
GP_draws = draw_GP(Yi,Ti,Xi,ind_kfi,ind_kti,method=method, gp_params=gp_params)
pad_len = grid_max-X_len #pad by this much
#padding direction:
if pad_before:
print('Padding GP_draws before observed data..')
padded_GP_draws = tf.concat([tf.zeros((n_mc_smps,pad_len,M)), GP_draws],1)
else:
padded_GP_draws = tf.concat([GP_draws,tf.zeros((n_mc_smps,pad_len,M))],1)
if lab_vitals_only:
Z = tf.concat([Z,padded_GP_draws],0) #without covs
else: #with covs
medcovs = tf.slice(cov_grid,[i,0,0],[1,-1,-1])
tiled_medcovs = tf.tile(medcovs,[n_mc_smps,1,1])
padded_GPdraws_medcovs = tf.concat([padded_GP_draws,tiled_medcovs],2)
Z = tf.concat([Z,padded_GPdraws_medcovs],0) #with covs
return i+1,Z
i = tf.constant(0)
#with tf.control_dependencies([tf.Print(tf.shape(ind_kf), [tf.shape(ind_kf), tf.shape(ind_kt), num_obs_values], 'ind_kf & ind_kt & num_obs_values')]):
i,Z = tf.while_loop(cond,body,loop_vars=[i,Z],
shape_invariants=[i.get_shape(),tf.TensorShape([None,None,None])])
return Z
@ex.capture
def get_preds(Y,T,X,ind_kf,ind_kt,num_obs_times,num_obs_values,
num_tcn_grid_times, cov_grid, input_dim,method, gp_params, tcn, is_training, n_classes, lab_vitals_only, pad_before, losstype): #med_cov_grid
"""
helper function. takes in (padded) raw datas, samples MGP for each observation,
then feeds it all through the TCN to get predictions.
inputs:
Y: array of observation values (labs/vitals). batchsize x batch_maxlen_y
T: array of observation times (times during encounter). batchsize x batch_maxlen_t
X: array of grid points. batchsize x batch_maxgridlen
ind_kf: indices into each row of Y, pointing towards which lab/vital. same size as Y
ind_kt: indices into each row of Y, pointing towards which time. same size as Y
num_obs_times: number of times observed for each encounter; how long each row of T really is
num_obs_values: number of lab values observed per encounter; how long each row of Y really is
num_tcn_grid_times: length of even spaced TCN grid per encounter
returns:
predictions (unnormalized log probabilities) for each MC sample of each obs
"""
Z = get_GP_samples(Y,T,X,ind_kf,ind_kt,num_obs_times,num_obs_values,
num_tcn_grid_times, cov_grid, input_dim, method=method, gp_params=gp_params, lab_vitals_only=lab_vitals_only, pad_before=pad_before) #batchsize*num_MC x batch_maxseqlen x num_inputs ##,med_cov_grid
Z.set_shape([None,None,input_dim]) #somehow lost shape info, but need this
N = tf.shape(T)[0] #number of observations
tcn_logits = tf.layers.dense(
tcn(Z, training=is_training)[:, -1, :],
n_classes, activation=None,
kernel_initializer=tf.orthogonal_initializer(),
name='last_linear', reuse=(losstype == 'average')
) # reuse should be true if losstype is average
return tcn_logits
@ex.capture
def get_losses(Y,T,X,ind_kf,ind_kt,num_obs_times,num_obs_values,
num_tcn_grid_times, cov_grid, input_dim,method, gp_params, tcn, is_training, n_classes, lab_vitals_only, pad_before,
labels, pos_weight): #med_cov_grid
"""
helper function. takes in (padded) raw datas, samples MGP for each observation,
then feeds it all through the TCN to get predictions.
inputs:
Y: array of observation values (labs/vitals). batchsize x batch_maxlen_y
T: array of observation times (times during encounter). batchsize x batch_maxlen_t
ind_kf: indiceste into each row of Y, pointing towards which lab/vital. same size as Y
ind_kt: indices into each row of Y, pointing towards which time. same size as Y
num_obs_times: number of times observed for each encounter; how long each row of T really is
num_obs_values: number of lab values observed per encounter; how long each row of Y really is
num_tcn_grid_times: length of even spaced TCN grid per encounter
returns:
predictions (unnormalized log probabilities) for each MC sample of each obs
"""
Z = get_GP_samples(Y,T,X,ind_kf,ind_kt,num_obs_times,num_obs_values,
num_tcn_grid_times, cov_grid, input_dim, method=method, gp_params=gp_params, lab_vitals_only=lab_vitals_only, pad_before=pad_before) #batchsize*num_MC x batch_maxseqlen x num_inputs ##,med_cov_grid
Z.set_shape([None,None,input_dim]) #somehow lost shape info, but need this
N = tf.shape(Z)[0] #number of observations
# We only want to consider up tw0 7 timepoints before end
T_max = 7
# Only during training we want to average over the last few predictions in order to give
# the model the incentive to predict early
tcn_out = tcn(Z, training=is_training)[:, -T_max:, :]
tcn_logits = tf.layers.dense(tcn_out,
n_classes, activation=None,
kernel_initializer=tf.orthogonal_initializer(),
name='last_linear', reuse=False
)
# Only get a few of the last obs
#used_grid = tf.reduce_min(tf.stack([num_tcn_grid_times, tf.fill(tf.shape(num_tcn_grid_times), T_max)]), axis=0)
#tiled = tf.tile(tf.expand_dims(used_grid, axis=-1), [1, gp_params.n_mc_smps])
#expanded_used_grid = tf.reshape(tiled, [-1])
tiled_labels = tf.tile(tf.expand_dims(labels, axis=1), tf.stack([1, T_max, 1]))
all_losses = tf.nn.weighted_cross_entropy_with_logits(logits=tcn_logits,targets=tiled_labels, pos_weight=pos_weight)
average_losses = tf.reduce_mean(all_losses, axis=-1)
return average_losses
@ex.capture
def get_probs_and_accuracy(preds, O, n_mc_smps):
"""
helper function. we have a prediction for each MC sample of each observation
in this batch. need to distill the multiple preds from each MC into a single
pred for this observation. also get accuracy. use true probs to get ROC, PR curves in sklearn
"""
all_probs = tf.exp(preds[:,1] - tf.reduce_logsumexp(preds, axis = 1)) #normalize; and drop a dim so only prob of positive case
N = tf.cast(tf.shape(preds)[0]/n_mc_smps,tf.int32) #actual number of observations in preds, collapsing MC samples
#predicted probability per observation; collapse the MC samples
probs = tf.zeros([0]) #store all samples in a list, then concat into tensor at end
#setup tf while loop (have to use this bc loop size is variable)
def cond(i,probs):
return i < N
def body(i,probs):
probs = tf.concat([probs,[tf.reduce_mean(tf.slice(all_probs,[i*n_mc_smps],[n_mc_smps]))]],0)
return i+1,probs
i = tf.constant(0)
i,probs = tf.while_loop(cond,body,loop_vars=[i,probs],shape_invariants=[i.get_shape(),tf.TensorShape([None])])
#compare to truth; just use cutoff of 0.5 for right now to get accuracy
correct_pred = tf.equal(tf.cast(tf.greater(probs,0.5),tf.int32), O)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
return probs,accuracy
@ex.config
def mpg_tcn_config():
dataset = {
'na_thres': 500, #30,100 numb of non-nans a variable must show in prepro for not being dropped (for dropping too rarely sampled variables)
'datapath': 'output/',
'overwrite': 0, #0 #indicates whether preproscript should run through, otherwise load previous dump.
'horizon': 0,
'data_sources': ['labs','vitals','covs'], #future: meds #labs and vitals required currently! #TODO: make prepro script more modular s.t. labs and vitals can be left away
'lab_vitals_only': True, # Flag for only using lab vitals and no covs in get_preds, get_GP_samples..
'min_length': 7, #None #Minimal num of gridpoints below which sample is dropped
'max_length': 200, #None #Maximal num of gridpoints above which sample is dropped (5 outliers lead to 3-4x memory use)
'min_pad_length': 8, #None Minimal time series length in padded batches which is max(max_batch_len, min_pad_length) such that no batch is shorter than this parameter (such that TCN works!)
'num_obs_thres': 10000, #None, Min and Max number of values (patients with 0 values appear due to control matching, drop controls with no values..) currently, 10k drops 1 patient, this is for memory saving.
'split':0 # 0-4
}
#Padding method:
pad_before=False #zero padding of batches before observed data, instead of after if False.
#GP method:
decomposition_method='chol' #'cg'
add_diag = 1e-1 #changed from 1e-3 for more stability
losstype='weighted' # 'regular'
batch_size = 100 #NOTE may want to play around with this
learning_rate = 0.001
decay_learning_rate = False
training_iters = 30 #num epochs
levels=4
kernel_size=4
n_hidden = 40 # hidden layer num of features; assumed same
#n_layers = 1 #3 number of layers of stacked LSTMs
n_mc_smps = 10 #10
dropout = 0.1 #not keep-dropout, but % of dropping out!
reduction_dim = None # Reduction dim for 1x1 conv in temporal blocks. Set to None to disable
drop_first_res = False #When applying l1 / sparsity, drop first residual connection in TemporalConvnet s.t. all responsibility with 1st convolution filters
l1_filter_reg = None # regularize first conv layer for sparse and interpretable filters..
L1_penalty = 0 # coefficient for l1 norm of first filter in loss
L2_penalty = None # using per-weight norm! hence lambda so large.. multiplied with start per weight-norm value arrives at around 10. train loss around 100-2000
#Configuration: reseting time to 0-48 instead of using raw hour from in-time (e.g. 210.5 - 258.5)
time_reset = 0 #1
@ex.named_config
def decay_lr():
learning_rate = 0.01
decay_learning_rate = True
@ex.capture(prefix='dataset')
def get_dataset(na_thres, datapath, overwrite, horizon, data_sources, min_length, max_length, split, num_obs_thres):
print('USING SPLIT {}, HORIZON {}'.format(split,horizon))
datapath += 'mgp-tcn-datadump_'+'_'.join([str(el) for el in data_sources])+'_na_thres_{}_min_length_{}_max_length_{}_horizon_0_split_{}.pkl'.format(na_thres, min_length, max_length, split)
# datapath += 'mgp-tcn-datadump_'+'_'.join([str(el) for el in data_sources])+'_na_thres_{}_min_length_{}_max_length_{}_horizon_0_split_{}_new_extend.pkl'.format(na_thres, min_length, max_length, split)
if (overwrite or not os.path.isfile(datapath) ): #check if data was not prepared and loaded before:
if overwrite:
print('Overwriting mode: running prepro script, dumping and loading data..')
else:
print('Data dump not found. Running prepro script, dumping and loading data..')
#Preprocess and load data..
full_dataset = load_data(test_size=0.1, horizon=0, na_thres=na_thres, variable_start_index = 5, data_sources=data_sources, min_length=min_length, max_length=max_length, split=split)
#Then dump it, for faster iterating
pickle.dump( full_dataset, open(datapath, "wb"))
else:
print('Loading existing preprocessed data dump')
full_dataset = pickle.load( open( datapath, "rb" ))
if horizon !=0:
train_data,validation_data,test_data = full_dataset[1:4]
train_static_data, validation_static_data, test_static_data = full_dataset[4:7]
#first masking (necessary for select_horizon function to work)
obs_min = 10 #hard-coded for now, but we never used different value due to constant mc_smps..
print('First masking before horizon selection')
stats_train, train_data, train_static_data = ev_mask(train_data, num_obs_thres, obs_min, static=train_static_data)
stats_validation, validation_data, validation_static_data = ev_mask(validation_data, num_obs_thres, obs_min, static=validation_static_data)
stats_test, test_data, test_static_data = ev_mask(test_data, num_obs_thres, obs_min, static=test_static_data)
train_data = select_horizon(train_data, horizon)
validation_data = select_horizon(validation_data, horizon)
test_data = select_horizon(test_data, horizon)
full_dataset = full_dataset[:1] + \
[train_data, validation_data, test_data, train_static_data, validation_static_data, test_static_data]
return full_dataset
def mad(arr):
""" Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variabililty of the sample.
https://en.wikipedia.org/wiki/Median_absolute_deviation
"""
med = np.median(arr)
return np.median(np.abs(arr - med))
def print_var_statistics(name, values):
print(f'{name}: mean: {np.mean(values)} +/- {np.std(values)}, median: {np.median(values)} +/- {mad(values)}')
@ex.command(prefix='dataset')
def dataset_statistics(na_thres, datapath, horizon, data_sources, min_length, max_length, split):
print('USING SPLIT {}'.format(split))
datapath += 'mgp-tcn-datadump_'+'_'.join([str(el) for el in data_sources])+'_na_thres_{}_min_length_{}_max_length_{}_horizon_{}_split_{}.pkl'.format(na_thres, min_length, max_length, horizon, split)
print('Loading existing preprocessed data dump')
#train_data, validation_data, test_data, variables = pickle.load( open( datapath, "rb" ))
full_dataset = pickle.load( open( datapath, "rb" ))
train_data, validation_data, test_data = full_dataset[1:4]
for name, data in zip(['train', 'val', 'test'], [train_data, validation_data, test_data]):
values = data[0]
times = data[1]
ind_lvs = data[2]
ind_times = data[3]
labels = data[4]
num_tcn_grid_times = data[5]
tcn_grid_times = data[6]
num_obs_times = data[7]
num_obs_values = data[8]
print(f'{name}')
print_var_statistics('num_obs_times', num_obs_times)
print_var_statistics('num_obs_values', num_obs_values)
print_var_statistics('num_tcn_grid_times', num_tcn_grid_times)
@ex.main
def fit_mgp_tcn(decomposition_method, add_diag, losstype, n_hidden, levels, kernel_size, n_mc_smps, dropout, reduction_dim, batch_size, learning_rate, decay_learning_rate,
training_iters, time_reset, l1_filter_reg, drop_first_res, L1_penalty, L2_penalty, pad_before, _rnd, _seed, _run, dataset):
log_file = dataset['LOG_FILE']
#Parameters (hard-coded for prototyping)
if len(_run.observers) > 0:
checkpoint_path = os.path.join(_run.observers[0].dir, 'model_checkpoints')
else:
checkpoint_path = 'model_checkpoints'
rs = _rnd #fixed seed in np
tf.logging.set_verbosity(tf.logging.ERROR)
# Load dataset
full_dataset = get_dataset() # load dataset
data_sources = dataset['data_sources'] #get names of data sources (labs, vitals ,..)
lab_vitals_only = dataset['lab_vitals_only']
num_obs_thres = dataset['num_obs_thres']
min_pad_length = dataset['min_pad_length'] #minimal time series length a batch should be padded to (for TCNs..)
obs_min = np.max([10, n_mc_smps]) #remove the samples with less than 10 observation values, this lanczos impl fails when mc_smps > num_obs
method = DecompositionMethod(decomposition_method, add_diag)
print('Data is loaded. Now assign available data sources to variables')
if lab_vitals_only:
print('Only lab and vitals will be used..')
#TODO: assign items of datasetlist to variable names as line below! each data_source has 3 splits (except labvitals in one currently)
index = 0
variables = full_dataset[index] #variable list always first element
index += 1 # go to next element in full_dataset
if 'labs' and 'vitals' in data_sources:
train_data,validation_data,test_data = full_dataset[index:index+3]
index+=3
else:
raise ValueError('Labs or Vitals not selected in config, yet they are required') #TODO: make this case possible
if 'covs' in data_sources:
train_static_data, validation_static_data, test_static_data = full_dataset[index:index+3]
if num_obs_thres is not None:
train_data, train_static_data = mask_large_samples(train_data, num_obs_thres, obs_min, static=train_static_data)
validation_data, validation_static_data = mask_large_samples(validation_data, num_obs_thres, obs_min, static=validation_static_data)
test_data, test_static_data = mask_large_samples(test_data, num_obs_thres, obs_min, static=test_static_data)
elif 'labs' or 'vitals' in data_soures:
if num_obs_thres is not None:
train_data = mask_large_samples(train_data, num_obs_thres, obs_min)
validation_data = mask_large_samples(validation_data, num_obs_thres, obs_min)
test_data = mask_large_samples(test_data, num_obs_thres, obs_min)
# Check if we reset_times:
if time_reset:
# for all splits, reset times to 0 - 48 hours, tcn_grid points accordingly!
train_data,validation_data,test_data = reset_times(train_data,validation_data,test_data)
print('Time was reset to [0, 48] hours')
M = len(variables)
Ntr = len(train_data[0])
Nva = len(validation_data[0])
n_covs = train_static_data.shape[1]
print('n_covs = {}'.format(n_covs))
print('data sources: {}'.format(data_sources))
print('covs in data_sources: {}'.format('covs' in data_sources))
#Assign data splits:
values_tr = train_data[0]; values_va = validation_data[0]
times_tr = train_data[1]; times_va = validation_data[1]
ind_lvs_tr = train_data[2]; ind_lvs_va = validation_data[2]
ind_times_tr = train_data[3]; ind_times_va = validation_data[3]
labels_tr = train_data[4]; labels_va = validation_data[4]
num_tcn_grid_times_tr = train_data[5]; num_tcn_grid_times_va = validation_data[5]
tcn_grid_times_tr = train_data[6]; tcn_grid_times_va = validation_data[6]
num_obs_times_tr = train_data[7]; num_obs_times_va = validation_data[7]
num_obs_values_tr = train_data[8]; num_obs_values_va = validation_data[8]
if 'covs' in data_sources:
covs_tr = train_static_data
covs_va = validation_static_data #; covs_te = test_static_data
#Get class imbalance (for weighted loss):
case_prev = labels_tr.sum()/float(len(labels_tr)) #get prevalence of cases in train dataset
class_imb = 1/case_prev #class imbalance to use as class weight if losstype='weighted'
# write_file = open(log_file,'a+')
# write_file.write("{} {} {} {}\n".format(dataset['split'], dataset['horizon'], labels_tr.shape, labels_va.shape))
# write_file.close()
# return
print("data fully setup!")
sys.stdout.flush()
# print('EXITING AFTER LOADING DATA')
# sys.exit()
#####
##### Setup model and graph
#####
# Learning Parameters
decay_step = int(Ntr/batch_size) #100 #after how many batches will the learning rate be adjusted..
##test_freq = Ntr/batch_size #eval on test set after this many batches
test_freq = int(Ntr/batch_size / 4)
# Network Parameters
n_classes = 2 #binary outcome
if lab_vitals_only:
input_dim = M
elif 'covs' in data_sources:
input_dim = M + n_covs #dimensionality of input sequence. ## M+n_meds+n_covs
else:
input_dim = M
# Create graph
# If we were to reset the default graph here, then we cannot control randomness
#Experiment for trying to reproduce randomness..
tf.set_random_seed(_seed)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
#define decaying learning rate:
global_step = tf.Variable(0, trainable=False) #Cave, had to add it to Adam loss min()!
if decay_learning_rate:
learning_rate = tf.train.exponential_decay(learning_rate, global_step,
decay_step , 0.96, staircase=True)
##### tf Graph - inputs
#observed values, times, inducing times; padded to longest in the batch
Y = tf.placeholder("float", [None,None]) #batchsize x batch_maxdata_length
T = tf.placeholder("float", [None,None]) #batchsize x batch_maxdata_length
ind_kf = tf.placeholder(tf.int32, [None,None]) #index tasks in Y vector
ind_kt = tf.placeholder(tf.int32, [None,None]) #index inputs in Y vector
X = tf.placeholder("float", [None,None]) #grid points. batchsize x batch_maxgridlen
cov_grid = tf.placeholder("float", [None,None,n_covs]) #combine w GP smps to feed into TCN ## n_meds+n_covs
O = tf.placeholder(tf.int32, [None]) #labels. input is NOT as one-hot encoding; convert at each iter
num_obs_times = tf.placeholder(tf.int32, [None]) #number of observation times per encounter
num_obs_values = tf.placeholder(tf.int32, [None]) #number of observation values per encounter
num_tcn_grid_times = tf.placeholder(tf.int32, [None]) #length of each grid to be fed into TCN in batch
N = tf.shape(Y)[0]
#also make O one-hot encoding, for the loss function
O_dupe_onehot = tf.one_hot(tf.reshape(tf.tile(tf.expand_dims(O,1),[1,n_mc_smps]),[N*n_mc_smps]),n_classes)
gp_params = GPParameters(input_dim, M, n_mc_smps) #changed M to input_dim, for only defining/setting input_dim once!
#Define TCN Network:
calculated_length = get_tcn_window(kernel_size, levels)
if calculated_length > min_pad_length:
print('Timeseries min_pad_length: {} are too short for Specified TCN Parameters requiring {}'.format(min_pad_length, calculated_length))
min_pad_length = calculated_length
print('>>>>>> Setting min_pad_length to: {}'.format(min_pad_length))
#initialize architecture:
tcn = TemporalConvNet([n_hidden] * levels, kernel_size, dropout, reduction_dim=reduction_dim, drop_first_res=drop_first_res)
is_training = tf.placeholder("bool")
#define tcn outputs:
##### Get predictions and feed into optimization
if losstype=='average':
losses = get_losses(Y,T,X,ind_kf,ind_kt,num_obs_times,num_obs_values,num_tcn_grid_times, cov_grid, input_dim,
method=method, gp_params=gp_params, tcn=tcn, is_training=is_training, n_classes=n_classes, lab_vitals_only=lab_vitals_only, pad_before=pad_before,
labels=O_dupe_onehot, pos_weight=class_imb)
loss_fit = tf.reduce_sum(losses)
preds = get_preds(Y,T,X,ind_kf,ind_kt,num_obs_times,num_obs_values,num_tcn_grid_times, cov_grid, input_dim,
method=method, gp_params=gp_params, tcn=tcn, is_training=is_training, n_classes=n_classes, lab_vitals_only=lab_vitals_only, pad_before=pad_before, losstype=losstype) #med_cov_grid
probs,accuracy = get_probs_and_accuracy(preds,O)
# Define optimization problem
if losstype=='weighted':
loss_fit = tf.reduce_sum(tf.nn.weighted_cross_entropy_with_logits(logits=preds,targets=O_dupe_onehot, pos_weight=class_imb))
if losstype=='sq_hinge':
h = tf.keras.losses.SquaredHinge()
loss_fit = h(O_dupe_onehot,preds)
if L2_penalty is not None:
loss_reg = compute_global_l2() # normalized per weight! hence, use large lambda!
loss = loss_fit + loss_reg*L2_penalty
if l1_filter_reg is not None:
loss_reg = compute_l1()
if L2_penalty is not None:
loss = loss + L1_penalty*loss_reg
else:
loss = loss_fit + L1_penalty*loss_reg
if (L2_penalty is None) and (l1_filter_reg is None):
loss = loss_fit
train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step=global_step) ## added global step to minimize()
##### Initialize globals and get ready to start!
sess.run(tf.global_variables_initializer())
print("Graph setup!")
#for more details, uncomment:
#count_parameters()
### Add runoptions for memory issues:
run_options = tf.RunOptions(report_tensor_allocations_upon_oom = True)
#setup minibatch indices for training:
starts = np.arange(0,Ntr,batch_size)
ends = np.arange(batch_size,Ntr+1,batch_size)
if ends[-1]<Ntr:
ends = np.append(ends,Ntr)
num_batches = len(ends)
#setup minibatch indices for validation (memory saving)
va_starts = np.arange(0,Nva,batch_size)
va_ends = np.arange(batch_size,Nva+1,batch_size)
if va_ends[-1]<Nva:
va_ends = np.append(va_ends,Nva)
#here: initial position of validation padding..
T_pad_va, Y_pad_va, ind_kf_pad_va, ind_kt_pad_va, X_pad_va, cov_pad_va = pad_rawdata_nomed(
times_va, values_va, ind_lvs_va, ind_times_va,
tcn_grid_times_va, covs_va, num_tcn_grid_times_va, min_pad_length)
##### Main training loop
saver = tf.train.Saver(max_to_keep = None)
total_batches = 0
best_val = 0
best_auroc = 0
for i in range(training_iters):
#print max memory usage up to now
print('Max Memory Usage up to now')
print(sess.run(tf.contrib.memory_stats.MaxBytesInUse()))
#train
epoch_start = time()
print("Starting epoch "+"{:d}".format(i))
perm = rs.permutation(Ntr)
batch = 0
for s,e in zip(starts,ends):
if decay_learning_rate:
print('Currrent Learning Rate:')
print(sess.run(learning_rate))
batch_start = time()
inds = perm[s:e]
T_pad,Y_pad,ind_kf_pad,ind_kt_pad,X_pad, cov_pad = pad_rawdata_nomed(
times_tr[inds],values_tr[inds],ind_lvs_tr[inds],ind_times_tr[inds],
tcn_grid_times_tr[inds], covs_tr[inds,:], num_tcn_grid_times_tr[inds], min_pad_length) ## meds_on_grid_tr[inds],covs_tr[inds,:]
feed_dict={Y:Y_pad,T:T_pad,ind_kf:ind_kf_pad,ind_kt:ind_kt_pad,X:X_pad, cov_grid:cov_pad,
num_obs_times:num_obs_times_tr[inds],
num_obs_values:num_obs_values_tr[inds],
num_tcn_grid_times:num_tcn_grid_times_tr[inds],O:labels_tr[inds], is_training: True} ##med_cov_grid:meds_cov_pad,
try:
loss_,_ = sess.run([loss,train_op],feed_dict, options=run_options)
except Exception as e:
traceback.format_exc()
print('Error occured in tensorflow during training:', e)
#In addition dump more detailed traceback to txt file:
with NamedTemporaryFile(suffix='.csv') as f:
faulthandler.dump_traceback(f)
_run.add_artifact(f.name, 'faulthandler_dump.csv')
break
print("Batch "+"{:d}".format(batch)+"/"+"{:d}".format(num_batches)+\
", took: "+"{:.3f}".format(time()-batch_start)+", loss: "+"{:.5f}".format(loss_))
write_file = open(log_file,'a+')
write_file.write("train_loss,{},{},{:d},{:.5f}\n".format(dataset['split'], dataset['horizon'], i, loss_))
write_file.close()
sys.stdout.flush()
batch += 1; total_batches += 1
if total_batches % test_freq == 0: #Check val set every so often for early stopping
print('--> Entering validation step...')
#TODO: may also want to check validation performance at additional X hours back
#from the event time, as well as just checking performance at terminal time
#on the val set, so you know if it generalizes well further back in time as well
val_t = time()
#Batch-wise Validation Phase:
va_probs_tot = np.array([])
va_perm = rs.permutation(Nva)
va_labels_tot = labels_va[va_perm]
for v_s,v_e in zip(va_starts,va_ends):
va_inds = va_perm[v_s:v_e]
va_feed_dict={Y:Y_pad_va[va_inds,:], T:T_pad_va[va_inds,:], ind_kf:ind_kf_pad_va[va_inds,:],
ind_kt:ind_kt_pad_va[va_inds,:], X:X_pad_va[va_inds,:],
cov_grid:cov_pad_va[va_inds,:,:], num_obs_times:num_obs_times_va[va_inds],
num_obs_values:num_obs_values_va[va_inds], num_tcn_grid_times:num_tcn_grid_times_va[va_inds],
O:labels_va[va_inds], is_training: False}
try:
va_probs,va_acc,va_loss = sess.run([probs,accuracy,loss],va_feed_dict, options=run_options)
except Exception as e:
traceback.formats_exc()
print('Error occured in tensorflow during evaluation:', e)
break
#append current validation auprc to array of entire validation set
va_probs_tot = np.concatenate([va_probs_tot, va_probs])
va_auc = roc_auc_score(va_labels_tot, va_probs_tot)
va_prc = average_precision_score(va_labels_tot, va_probs_tot)
best_val = max(va_prc, best_val)
best_auroc = max(va_auc, best_auroc)
print("Epoch "+str(i)+", seen "+str(total_batches)+" total batches. Validating Took "+\
"{:.2f}".format(time()-val_t)+\
". OOS, "+str(0)+" hours back: Loss: "+"{:.5f}".format(va_loss)+ \
", AUC: {:.5f}".format(va_auc)+", AUPR: "+"{:.5f}".format(va_prc))
write_file = open(log_file,'a+')
write_file.write("val_loss,{},{},{:d},{:.5f},{:.5f},{:.5f}\n".format(dataset['split'], dataset['horizon'], i, loss_, va_prc, va_auc))
write_file.close()
_run.log_scalar('train_loss', loss_, total_batches)
_run.log_scalar('val_auprc', va_prc, total_batches)
sys.stdout.flush()
#create a folder and put model checkpoints there
# saver.save(sess, checkpoint_path + "/{}_epoch_{}".format(log_file.split('.')[0],i), global_step=total_batches)
print("Finishing epoch "+"{:d}".format(i)+", took "+\
"{:.3f}".format(time()-epoch_start))
write_file = open(log_file,'a+')
write_file.write("best,{},{},{},{},{:.5f},{:.5f}\n".format(dataset['split'], dataset['horizon'], labels_tr.shape, labels_va.shape, best_val, best_auroc))
write_file.close()
return {'Best Validation AUPRC': best_val}
if __name__ == '__main__':
ex.run_commandline()
| [
"tensorflow.meshgrid",
"tensorflow.slice",
"tensorflow.reduce_sum",
"tensorflow.matrix_band_part",
"numpy.abs",
"tensorflow.trainable_variables",
"numpy.sum",
"numpy.floor",
"tensorflow.reshape",
"tensorflow.logging.set_verbosity",
"tensorflow.diag_part",
"tensorflow.multiply",
"tensorflow.C... | [((938, 959), 'sacred.Experiment', 'Experiment', (['"""MGP-TCN"""'], {}), "('MGP-TCN')\n", (948, 959), False, 'from sacred import Experiment\n'), ((1648, 1678), 'numpy.logical_and', 'np.logical_and', (['mask', 'min_mask'], {}), '(mask, min_mask)\n', (1662, 1678), True, 'import numpy as np\n'), ((1753, 1765), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1762, 1765), True, 'import numpy as np\n'), ((2249, 2273), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (2271, 2273), True, 'import tensorflow as tf\n'), ((3028, 3050), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (3048, 3050), True, 'import tensorflow as tf\n'), ((3207, 3231), 'tensorflow.abs', 'tf.abs', (['conv1_kernel_val'], {}), '(conv1_kernel_val)\n', (3213, 3231), True, 'import tensorflow as tf\n'), ((3283, 3305), 'tensorflow.abs', 'tf.abs', (['conv1_bias_val'], {}), '(conv1_bias_val)\n', (3289, 3305), True, 'import tensorflow as tf\n'), ((3328, 3361), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['kernel_abs'], {'axis': '(0)'}), '(kernel_abs, axis=0)\n', (3341, 3361), True, 'import tensorflow as tf\n'), ((3502, 3525), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['bias_abs'], {}), '(bias_abs)\n', (3515, 3525), True, 'import tensorflow as tf\n'), ((3547, 3584), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['kernel_abs_sum'], {'axis': '(0)'}), '(kernel_abs_sum, axis=0)\n', (3560, 3584), True, 'import tensorflow as tf\n'), ((3697, 3725), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['l1_per_filter'], {}), '(l1_per_filter)\n', (3710, 3725), True, 'import tensorflow as tf\n'), ((4304, 4328), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (4326, 4328), True, 'import tensorflow as tf\n'), ((7796, 7811), 'tensorflow.diag', 'tf.diag', (['noises'], {}), '(noises)\n', (7803, 7811), True, 'import tensorflow as tf\n'), ((7826, 7855), 'tensorflow.meshgrid', 'tf.meshgrid', (['ind_kfi', 'ind_kfi'], {}), '(ind_kfi, ind_kfi)\n', (7837, 7855), True, 'import tensorflow as tf\n'), ((7959, 7988), 'tensorflow.meshgrid', 'tf.meshgrid', (['ind_kti', 'ind_kti'], {}), '(ind_kti, ind_kti)\n', (7970, 7988), True, 'import tensorflow as tf\n'), ((8070, 8097), 'tensorflow.multiply', 'tf.multiply', (['Kf_big', 'Kt_big'], {}), '(Kf_big, Kt_big)\n', (8081, 8097), True, 'import tensorflow as tf\n'), ((8643, 8664), 'tensorflow.meshgrid', 'tf.meshgrid', (['ind', 'ind'], {}), '(ind, ind)\n', (8654, 8664), True, 'import tensorflow as tf\n'), ((8774, 8797), 'tensorflow.meshgrid', 'tf.meshgrid', (['ind2', 'ind2'], {}), '(ind2, ind2)\n', (8785, 8797), True, 'import tensorflow as tf\n'), ((8880, 8908), 'tensorflow.multiply', 'tf.multiply', (['Kf_big', 'Kxx_big'], {}), '(Kf_big, Kxx_big)\n', (8891, 8908), True, 'import tensorflow as tf\n'), ((9046, 9089), 'tensorflow.meshgrid', 'tf.meshgrid', (['full_f', 'ind_kfi'], {'indexing': '"""ij"""'}), "(full_f, ind_kfi, indexing='ij')\n", (9057, 9089), True, 'import tensorflow as tf\n'), ((9205, 9248), 'tensorflow.meshgrid', 'tf.meshgrid', (['full_x', 'ind_kti'], {'indexing': '"""ij"""'}), "(full_x, ind_kti, indexing='ij')\n", (9216, 9248), True, 'import tensorflow as tf\n'), ((9327, 9355), 'tensorflow.multiply', 'tf.multiply', (['Kf_big', 'Kxt_big'], {}), '(Kf_big, Kxt_big)\n', (9338, 9355), True, 'import tensorflow as tf\n'), ((9392, 9415), 'tensorflow.reshape', 'tf.reshape', (['Yi', '[-1, 1]'], {}), '(Yi, [-1, 1])\n', (9402, 9415), True, 'import tensorflow as tf\n'), ((9428, 9465), 'tensorflow.random_normal', 'tf.random_normal', (['(nx * M, n_mc_smps)'], {}), '((nx * M, n_mc_smps))\n', (9444, 9465), True, 'import tensorflow as tf\n'), ((10988, 11022), 'tensorflow.zeros', 'tf.zeros', (['[0, grid_max, input_dim]'], {}), '([0, grid_max, input_dim])\n', (10996, 11022), True, 'import tensorflow as tf\n'), ((12619, 12633), 'tensorflow.constant', 'tf.constant', (['(0)'], {}), '(0)\n', (12630, 12633), True, 'import tensorflow as tf\n'), ((17358, 17467), 'tensorflow.nn.weighted_cross_entropy_with_logits', 'tf.nn.weighted_cross_entropy_with_logits', ([], {'logits': 'tcn_logits', 'targets': 'tiled_labels', 'pos_weight': 'pos_weight'}), '(logits=tcn_logits, targets=\n tiled_labels, pos_weight=pos_weight)\n', (17398, 17467), True, 'import tensorflow as tf\n'), ((17483, 17518), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['all_losses'], {'axis': '(-1)'}), '(all_losses, axis=-1)\n', (17497, 17518), True, 'import tensorflow as tf\n'), ((18241, 18254), 'tensorflow.zeros', 'tf.zeros', (['[0]'], {}), '([0])\n', (18249, 18254), True, 'import tensorflow as tf\n'), ((18590, 18604), 'tensorflow.constant', 'tf.constant', (['(0)'], {}), '(0)\n', (18601, 18604), True, 'import tensorflow as tf\n'), ((24508, 24522), 'numpy.median', 'np.median', (['arr'], {}), '(arr)\n', (24517, 24522), True, 'import numpy as np\n'), ((26565, 26607), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.ERROR'], {}), '(tf.logging.ERROR)\n', (26589, 26607), True, 'import tensorflow as tf\n'), ((26989, 27012), 'numpy.max', 'np.max', (['[10, n_mc_smps]'], {}), '([10, n_mc_smps])\n', (26995, 27012), True, 'import numpy as np\n'), ((30615, 30633), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (30631, 30633), False, 'import sys\n'), ((31443, 31468), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['_seed'], {}), '(_seed)\n', (31461, 31468), True, 'import tensorflow as tf\n'), ((31483, 31499), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (31497, 31499), True, 'import tensorflow as tf\n'), ((31554, 31579), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (31564, 31579), True, 'import tensorflow as tf\n'), ((31635, 31666), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (31646, 31666), True, 'import tensorflow as tf\n'), ((32020, 32057), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, None]'], {}), "('float', [None, None])\n", (32034, 32057), True, 'import tensorflow as tf\n'), ((32099, 32136), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, None]'], {}), "('float', [None, None])\n", (32113, 32136), True, 'import tensorflow as tf\n'), ((32183, 32221), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, None]'], {}), '(tf.int32, [None, None])\n', (32197, 32221), True, 'import tensorflow as tf\n'), ((32259, 32297), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, None]'], {}), '(tf.int32, [None, None])\n', (32273, 32297), True, 'import tensorflow as tf\n'), ((32331, 32368), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, None]'], {}), "('float', [None, None])\n", (32345, 32368), True, 'import tensorflow as tf\n'), ((32426, 32471), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, None, n_covs]'], {}), "('float', [None, None, n_covs])\n", (32440, 32471), True, 'import tensorflow as tf\n'), ((32536, 32568), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {}), '(tf.int32, [None])\n', (32550, 32568), True, 'import tensorflow as tf\n'), ((32653, 32685), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {}), '(tf.int32, [None])\n', (32667, 32685), True, 'import tensorflow as tf\n'), ((32751, 32783), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {}), '(tf.int32, [None])\n', (32765, 32783), True, 'import tensorflow as tf\n'), ((32854, 32886), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {}), '(tf.int32, [None])\n', (32868, 32886), True, 'import tensorflow as tf\n'), ((34039, 34061), 'tensorflow.placeholder', 'tf.placeholder', (['"""bool"""'], {}), "('bool')\n", (34053, 34061), True, 'import tensorflow as tf\n'), ((36099, 36153), 'tensorflow.RunOptions', 'tf.RunOptions', ([], {'report_tensor_allocations_upon_oom': '(True)'}), '(report_tensor_allocations_upon_oom=True)\n', (36112, 36153), True, 'import tensorflow as tf\n'), ((36213, 36242), 'numpy.arange', 'np.arange', (['(0)', 'Ntr', 'batch_size'], {}), '(0, Ntr, batch_size)\n', (36222, 36242), True, 'import numpy as np\n'), ((36252, 36294), 'numpy.arange', 'np.arange', (['batch_size', '(Ntr + 1)', 'batch_size'], {}), '(batch_size, Ntr + 1, batch_size)\n', (36261, 36294), True, 'import numpy as np\n'), ((36453, 36482), 'numpy.arange', 'np.arange', (['(0)', 'Nva', 'batch_size'], {}), '(0, Nva, batch_size)\n', (36462, 36482), True, 'import numpy as np\n'), ((36495, 36537), 'numpy.arange', 'np.arange', (['batch_size', '(Nva + 1)', 'batch_size'], {}), '(batch_size, Nva + 1, batch_size)\n', (36504, 36537), True, 'import numpy as np\n'), ((36926, 36958), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': 'None'}), '(max_to_keep=None)\n', (36940, 36958), True, 'import tensorflow as tf\n'), ((5734, 5757), 'tensorflow.exp', 'tf.exp', (['self.log_length'], {}), '(self.log_length)\n', (5740, 5757), True, 'import tensorflow as tf\n'), ((5933, 5956), 'tensorflow.exp', 'tf.exp', (['self.log_noises'], {}), '(self.log_noises)\n', (5939, 5956), True, 'import tensorflow as tf\n'), ((6075, 6116), 'tensorflow.matrix_band_part', 'tf.matrix_band_part', (['self.L_f_init', '(-1)', '(0)'], {}), '(self.L_f_init, -1, 0)\n', (6094, 6116), True, 'import tensorflow as tf\n'), ((6951, 6979), 'numpy.array', 'np.array', (['num_tcn_grid_times'], {}), '(num_tcn_grid_times)\n', (6959, 6979), True, 'import numpy as np\n'), ((7001, 7025), 'numpy.array', 'np.array', (['tcn_grid_times'], {}), '(tcn_grid_times)\n', (7009, 7025), True, 'import numpy as np\n'), ((7737, 7749), 'tensorflow.shape', 'tf.shape', (['Yi'], {}), '(Yi)\n', (7745, 7749), True, 'import tensorflow as tf\n'), ((7905, 7941), 'tensorflow.stack', 'tf.stack', (['(grid_f[0], grid_f[1])', '(-1)'], {}), '((grid_f[0], grid_f[1]), -1)\n', (7913, 7941), True, 'import tensorflow as tf\n'), ((8020, 8056), 'tensorflow.stack', 'tf.stack', (['(grid_t[0], grid_t[1])', '(-1)'], {}), '((grid_t[0], grid_t[1]), -1)\n', (8028, 8056), True, 'import tensorflow as tf\n'), ((8126, 8162), 'tensorflow.stack', 'tf.stack', (['(grid_f[0], grid_f[1])', '(-1)'], {}), '((grid_f[0], grid_f[1]), -1)\n', (8134, 8162), True, 'import tensorflow as tf\n'), ((8179, 8199), 'tensorflow.diag_part', 'tf.diag_part', (['DI_big'], {}), '(DI_big)\n', (8191, 8199), True, 'import tensorflow as tf\n'), ((8456, 8468), 'tensorflow.shape', 'tf.shape', (['Xi'], {}), '(Xi)\n', (8464, 8468), True, 'import tensorflow as tf\n'), ((8693, 8725), 'tensorflow.stack', 'tf.stack', (['(grid[0], grid[1])', '(-1)'], {}), '((grid[0], grid[1]), -1)\n', (8701, 8725), True, 'import tensorflow as tf\n'), ((8744, 8756), 'tensorflow.range', 'tf.range', (['nx'], {}), '(nx)\n', (8752, 8756), True, 'import tensorflow as tf\n'), ((8830, 8864), 'tensorflow.stack', 'tf.stack', (['(grid2[0], grid2[1])', '(-1)'], {}), '((grid2[0], grid2[1]), -1)\n', (8838, 8864), True, 'import tensorflow as tf\n'), ((9117, 9153), 'tensorflow.stack', 'tf.stack', (['(grid_1[0], grid_1[1])', '(-1)'], {}), '((grid_1[0], grid_1[1]), -1)\n', (9125, 9153), True, 'import tensorflow as tf\n'), ((9174, 9186), 'tensorflow.range', 'tf.range', (['nx'], {}), '(nx)\n', (9182, 9186), True, 'import tensorflow as tf\n'), ((9279, 9315), 'tensorflow.stack', 'tf.stack', (['(grid_2[0], grid_2[1])', '(-1)'], {}), '((grid_2[0], grid_2[1]), -1)\n', (9287, 9315), True, 'import tensorflow as tf\n'), ((9563, 9578), 'tensorflow.cholesky', 'tf.cholesky', (['Ky'], {}), '(Ky)\n', (9574, 9578), True, 'import tensorflow as tf\n'), ((10965, 10976), 'tensorflow.shape', 'tf.shape', (['X'], {}), '(X)\n', (10973, 10976), True, 'import tensorflow as tf\n'), ((11034, 11045), 'tensorflow.shape', 'tf.shape', (['T'], {}), '(T)\n', (11042, 11045), True, 'import tensorflow as tf\n'), ((14541, 14552), 'tensorflow.shape', 'tf.shape', (['T'], {}), '(T)\n', (14549, 14552), True, 'import tensorflow as tf\n'), ((16467, 16478), 'tensorflow.shape', 'tf.shape', (['Z'], {}), '(Z)\n', (16475, 16478), True, 'import tensorflow as tf\n'), ((17284, 17314), 'tensorflow.expand_dims', 'tf.expand_dims', (['labels'], {'axis': '(1)'}), '(labels, axis=1)\n', (17298, 17314), True, 'import tensorflow as tf\n'), ((17316, 17339), 'tensorflow.stack', 'tf.stack', (['[1, T_max, 1]'], {}), '([1, T_max, 1])\n', (17324, 17339), True, 'import tensorflow as tf\n'), ((18907, 18940), 'tensorflow.cast', 'tf.cast', (['correct_pred', 'tf.float32'], {}), '(correct_pred, tf.float32)\n', (18914, 18940), True, 'import tensorflow as tf\n'), ((24544, 24561), 'numpy.abs', 'np.abs', (['(arr - med)'], {}), '(arr - med)\n', (24550, 24561), True, 'import numpy as np\n'), ((31760, 31852), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['learning_rate', 'global_step', 'decay_step', '(0.96)'], {'staircase': '(True)'}), '(learning_rate, global_step, decay_step, 0.96,\n staircase=True)\n', (31786, 31852), True, 'import tensorflow as tf\n'), ((32949, 32960), 'tensorflow.shape', 'tf.shape', (['Y'], {}), '(Y)\n', (32957, 32960), True, 'import tensorflow as tf\n'), ((34545, 34566), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['losses'], {}), '(losses)\n', (34558, 34566), True, 'import tensorflow as tf\n'), ((35184, 35214), 'tensorflow.keras.losses.SquaredHinge', 'tf.keras.losses.SquaredHinge', ([], {}), '()\n', (35212, 35214), True, 'import tensorflow as tf\n'), ((35914, 35947), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (35945, 35947), True, 'import tensorflow as tf\n'), ((36328, 36348), 'numpy.append', 'np.append', (['ends', 'Ntr'], {}), '(ends, Ntr)\n', (36337, 36348), True, 'import numpy as np\n'), ((36577, 36600), 'numpy.append', 'np.append', (['va_ends', 'Nva'], {}), '(va_ends, Nva)\n', (36586, 36600), True, 'import numpy as np\n'), ((37253, 37259), 'time.time', 'time', ([], {}), '()\n', (37257, 37259), False, 'from time import time\n'), ((1609, 1626), 'numpy.sum', 'np.sum', (['(~min_mask)'], {}), '(~min_mask)\n', (1615, 1626), True, 'import numpy as np\n'), ((1724, 1737), 'numpy.sum', 'np.sum', (['(~mask)'], {}), '(~mask)\n', (1730, 1737), True, 'import numpy as np\n'), ((4409, 4425), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['w'], {}), '(w)\n', (4422, 4425), True, 'import tensorflow as tf\n'), ((5649, 5690), 'tensorflow.random_normal', 'tf.random_normal', (['[1]'], {'mean': '(1)', 'stddev': '(0.1)'}), '([1], mean=1, stddev=0.1)\n', (5665, 5690), True, 'import tensorflow as tf\n'), ((5840, 5890), 'tensorflow.random_normal', 'tf.random_normal', (['[input_dim]'], {'mean': '(-2)', 'stddev': '(0.1)'}), '([input_dim], mean=-2, stddev=0.1)\n', (5856, 5890), True, 'import tensorflow as tf\n'), ((6025, 6042), 'tensorflow.eye', 'tf.eye', (['input_dim'], {}), '(input_dim)\n', (6031, 6042), True, 'import tensorflow as tf\n'), ((6151, 6172), 'tensorflow.transpose', 'tf.transpose', (['self.Lf'], {}), '(self.Lf)\n', (6163, 6172), True, 'import tensorflow as tf\n'), ((8368, 8378), 'tensorflow.eye', 'tf.eye', (['ny'], {}), '(ny)\n', (8374, 8378), True, 'import tensorflow as tf\n'), ((8592, 8610), 'tensorflow.tile', 'tf.tile', (['[i]', '[nx]'], {}), '([i], [nx])\n', (8599, 8610), True, 'import tensorflow as tf\n'), ((8981, 8999), 'tensorflow.tile', 'tf.tile', (['[i]', '[nx]'], {}), '([i], [nx])\n', (8988, 8999), True, 'import tensorflow as tf\n'), ((9607, 9632), 'tensorflow.cholesky_solve', 'tf.cholesky_solve', (['Ly', 'y_'], {}), '(Ly, y_)\n', (9624, 9632), True, 'import tensorflow as tf\n'), ((11237, 11280), 'tensorflow.slice', 'tf.slice', (['Y', '[i, 0]', '[1, num_obs_values[i]]'], {}), '(Y, [i, 0], [1, num_obs_values[i]])\n', (11245, 11280), True, 'import tensorflow as tf\n'), ((11414, 11456), 'tensorflow.slice', 'tf.slice', (['T', '[i, 0]', '[1, num_obs_times[i]]'], {}), '(T, [i, 0], [1, num_obs_times[i]])\n', (11422, 11456), True, 'import tensorflow as tf\n'), ((11488, 11536), 'tensorflow.slice', 'tf.slice', (['ind_kf', '[i, 0]', '[1, num_obs_values[i]]'], {}), '(ind_kf, [i, 0], [1, num_obs_values[i]])\n', (11496, 11536), True, 'import tensorflow as tf\n'), ((11568, 11616), 'tensorflow.slice', 'tf.slice', (['ind_kt', '[i, 0]', '[1, num_obs_values[i]]'], {}), '(ind_kt, [i, 0], [1, num_obs_values[i]])\n', (11576, 11616), True, 'import tensorflow as tf\n'), ((11643, 11690), 'tensorflow.slice', 'tf.slice', (['X', '[i, 0]', '[1, num_tcn_grid_times[i]]'], {}), '(X, [i, 0], [1, num_tcn_grid_times[i]])\n', (11651, 11690), True, 'import tensorflow as tf\n'), ((12233, 12267), 'tensorflow.concat', 'tf.concat', (['[Z, padded_GP_draws]', '(0)'], {}), '([Z, padded_GP_draws], 0)\n', (12242, 12267), True, 'import tensorflow as tf\n'), ((12327, 12369), 'tensorflow.slice', 'tf.slice', (['cov_grid', '[i, 0, 0]', '[1, -1, -1]'], {}), '(cov_grid, [i, 0, 0], [1, -1, -1])\n', (12335, 12369), True, 'import tensorflow as tf\n'), ((12392, 12427), 'tensorflow.tile', 'tf.tile', (['medcovs', '[n_mc_smps, 1, 1]'], {}), '(medcovs, [n_mc_smps, 1, 1])\n', (12399, 12427), True, 'import tensorflow as tf\n'), ((12462, 12508), 'tensorflow.concat', 'tf.concat', (['[padded_GP_draws, tiled_medcovs]', '(2)'], {}), '([padded_GP_draws, tiled_medcovs], 2)\n', (12471, 12508), True, 'import tensorflow as tf\n'), ((12523, 12564), 'tensorflow.concat', 'tf.concat', (['[Z, padded_GPdraws_medcovs]', '(0)'], {}), '([Z, padded_GPdraws_medcovs], 0)\n', (12532, 12564), True, 'import tensorflow as tf\n'), ((14732, 14759), 'tensorflow.orthogonal_initializer', 'tf.orthogonal_initializer', ([], {}), '()\n', (14757, 14759), True, 'import tensorflow as tf\n'), ((16894, 16921), 'tensorflow.orthogonal_initializer', 'tf.orthogonal_initializer', ([], {}), '()\n', (16919, 16921), True, 'import tensorflow as tf\n'), ((17923, 17957), 'tensorflow.reduce_logsumexp', 'tf.reduce_logsumexp', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (17942, 17957), True, 'import tensorflow as tf\n'), ((18841, 18863), 'tensorflow.greater', 'tf.greater', (['probs', '(0.5)'], {}), '(probs, 0.5)\n', (18851, 18863), True, 'import tensorflow as tf\n'), ((35043, 35147), 'tensorflow.nn.weighted_cross_entropy_with_logits', 'tf.nn.weighted_cross_entropy_with_logits', ([], {'logits': 'preds', 'targets': 'O_dupe_onehot', 'pos_weight': 'class_imb'}), '(logits=preds, targets=\n O_dupe_onehot, pos_weight=class_imb)\n', (35083, 35147), True, 'import tensorflow as tf\n'), ((35731, 35768), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (35753, 35768), True, 'import tensorflow as tf\n'), ((37560, 37566), 'time.time', 'time', ([], {}), '()\n', (37564, 37566), False, 'from time import time\n'), ((39229, 39247), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (39245, 39247), False, 'import sys\n'), ((6873, 6901), 'numpy.arange', 'np.arange', (['num_tcn_grid_time'], {}), '(num_tcn_grid_time)\n', (6882, 6901), True, 'import numpy as np\n'), ((9788, 9806), 'tensorflow.cholesky', 'tf.cholesky', (['Sigma'], {}), '(Sigma)\n', (9799, 9806), True, 'import tensorflow as tf\n'), ((9859, 9877), 'tensorflow.transpose', 'tf.transpose', (['draw'], {}), '(draw)\n', (9871, 9877), True, 'import tensorflow as tf\n'), ((12884, 12918), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None, None, None]'], {}), '([None, None, None])\n', (12898, 12918), True, 'import tensorflow as tf\n'), ((18034, 18049), 'tensorflow.shape', 'tf.shape', (['preds'], {}), '(preds)\n', (18042, 18049), True, 'import tensorflow as tf\n'), ((18695, 18717), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None]'], {}), '([None])\n', (18709, 18717), True, 'import tensorflow as tf\n'), ((24631, 24646), 'numpy.mean', 'np.mean', (['values'], {}), '(values)\n', (24638, 24646), True, 'import numpy as np\n'), ((24653, 24667), 'numpy.std', 'np.std', (['values'], {}), '(values)\n', (24659, 24667), True, 'import numpy as np\n'), ((24679, 24696), 'numpy.median', 'np.median', (['values'], {}), '(values)\n', (24688, 24696), True, 'import numpy as np\n'), ((33279, 33299), 'tensorflow.expand_dims', 'tf.expand_dims', (['O', '(1)'], {}), '(O, 1)\n', (33293, 33299), True, 'import tensorflow as tf\n'), ((37165, 37204), 'tensorflow.contrib.memory_stats.MaxBytesInUse', 'tf.contrib.memory_stats.MaxBytesInUse', ([], {}), '()\n', (37202, 37204), True, 'import tensorflow as tf\n'), ((39755, 39761), 'time.time', 'time', ([], {}), '()\n', (39759, 39761), False, 'from time import time\n'), ((39839, 39851), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (39847, 39851), True, 'import numpy as np\n'), ((41166, 41208), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['va_labels_tot', 'va_probs_tot'], {}), '(va_labels_tot, va_probs_tot)\n', (41179, 41208), False, 'from sklearn.metrics import roc_auc_score, average_precision_score\n'), ((41234, 41286), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['va_labels_tot', 'va_probs_tot'], {}), '(va_labels_tot, va_probs_tot)\n', (41257, 41286), False, 'from sklearn.metrics import roc_auc_score, average_precision_score\n'), ((42116, 42134), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (42132, 42134), False, 'import sys\n'), ((6760, 6778), 'numpy.floor', 'np.floor', (['end_time'], {}), '(end_time)\n', (6768, 6778), True, 'import numpy as np\n'), ((10234, 10254), 'tensorflow.matmul', 'tf.matmul', (['K_ff', 'vec'], {}), '(K_ff, vec)\n', (10243, 10254), True, 'import tensorflow as tf\n'), ((10530, 10548), 'tensorflow.transpose', 'tf.transpose', (['draw'], {}), '(draw)\n', (10542, 10548), True, 'import tensorflow as tf\n'), ((12040, 12073), 'tensorflow.zeros', 'tf.zeros', (['(n_mc_smps, pad_len, M)'], {}), '((n_mc_smps, pad_len, M))\n', (12048, 12073), True, 'import tensorflow as tf\n'), ((12151, 12184), 'tensorflow.zeros', 'tf.zeros', (['(n_mc_smps, pad_len, M)'], {}), '((n_mc_smps, pad_len, M))\n', (12159, 12184), True, 'import tensorflow as tf\n'), ((38455, 38477), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (38475, 38477), False, 'import traceback\n'), ((41099, 41139), 'numpy.concatenate', 'np.concatenate', (['[va_probs_tot, va_probs]'], {}), '([va_probs_tot, va_probs])\n', (41113, 41139), True, 'import numpy as np\n'), ((9692, 9710), 'tensorflow.transpose', 'tf.transpose', (['K_fy'], {}), '(K_fy)\n', (9704, 9710), True, 'import tensorflow as tf\n'), ((9738, 9752), 'tensorflow.shape', 'tf.shape', (['K_ff'], {}), '(K_ff)\n', (9746, 9752), True, 'import tensorflow as tf\n'), ((18501, 18550), 'tensorflow.slice', 'tf.slice', (['all_probs', '[i * n_mc_smps]', '[n_mc_smps]'], {}), '(all_probs, [i * n_mc_smps], [n_mc_smps])\n', (18509, 18550), True, 'import tensorflow as tf\n'), ((38643, 38676), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'suffix': '""".csv"""'}), "(suffix='.csv')\n", (38661, 38676), False, 'from tempfile import NamedTemporaryFile\n'), ((38703, 38733), 'faulthandler.dump_traceback', 'faulthandler.dump_traceback', (['f'], {}), '(f)\n', (38730, 38733), False, 'import faulthandler\n'), ((42423, 42429), 'time.time', 'time', ([], {}), '()\n', (42427, 42429), False, 'from time import time\n'), ((40840, 40863), 'traceback.formats_exc', 'traceback.formats_exc', ([], {}), '()\n', (40861, 40863), False, 'import traceback\n'), ((10293, 10311), 'tensorflow.transpose', 'tf.transpose', (['K_fy'], {}), '(K_fy)\n', (10305, 10311), True, 'import tensorflow as tf\n'), ((38966, 38972), 'time.time', 'time', ([], {}), '()\n', (38970, 38972), False, 'from time import time\n'), ((41534, 41540), 'time.time', 'time', ([], {}), '()\n', (41538, 41540), False, 'from time import time\n')] |
import numpy as np
import json
import sys
import os
import activations
import mnist
import utils
# Stop on RuntimeWarning during matrix processing : prevent silent overflows
np.seterr(all='raise')
def feed_forward(X_input: np.ndarray, weights: list, activation_fn: list) -> np.ndarray:
"""Feed fordward the network
X_input => input layer
weights => weights of every layer
x => data propagated in previous layers
w => weight of working layer
z => weighted propagation
y => activated propagation"""
x = [X_input]
# Forward loop
for id, w in enumerate(weights):
# Weighted average `z = w · x`
z = x[-1].dot(w)
# Activation function `y = g(x)`
y = activation_fn[id](z)
# Append `y` to previous layers
x.append(y)
return x
def grads(x: np.ndarray, y_expected: np.ndarray, weights: list, activations_fn: list, activations_prime: list) -> np.ndarray:
"""Calculate errors corrections with backward propagation
x => input layer
y_expected => expected output layer
weights => weights of every layer
y => actual output
delta => global (output) error of network
grads => gradient (correction) of weights
"""
# Forward propagation to catch network datas
y = feed_forward(x, weights, activations_fn)
# Calculate global error
delta = y[-1] - y_expected
# Calculate the cost (average error)
cost = 1/len(y) * np.sum((y[-1] - y_expected) ** 2)
# Calculate error of output weights layer
grads: np.ndarray = np.empty_like(weights)
grads[-1] = y[-2].T.dot(delta)
# Backward loop
for i in range(len(y)-2, 0, -1):
# Calculate error of each layer
delta = delta.dot(weights[i].T) * activations_prime[i](y[i])
# Calculate errors of weights
grads[i-1] = y[i-1].T.dot(delta)
return grads / len(x), cost
def train(weights: list, trainX: np.ndarray, trainY: np.ndarray, testX: np.ndarray, testY: np.ndarray, activations_fn: list, activations_prime: list, filename: np.ndarray, epochs: int, batch: int, learning_rate: float, save_timeout: int, graph: bool, no_infos: bool, reduce_output: int) -> dict:
path = os.path.dirname(__file__)
accuracy_table = []
average_cost_table = []
# Make prediction with the untrained network
prediction = np.argmax(feed_forward(
testX, weights, activations_fn)[-1], axis=1)
accuracy = np.mean(prediction == np.argmax(testY, axis=1))
accuracy_table.append(accuracy)
initial_cost = 1/len(testY) * np.sum((prediction -
np.argmax(testY, axis=1)) ** 2)
average_cost_table.append(initial_cost)
if reduce_output <= 1:
print('Accuracy at epoch 0 :', accuracy, ' cost =', initial_cost)
elif reduce_output == 2:
print(0, accuracy, initial_cost)
if epochs < 0:
epochs = 99999999999
# Epochs loop
for i in range(epochs):
cost_table = []
if reduce_output < 1:
try:
from tqdm import tqdm
except ImportError:
print('Cannot find module `tqdm`!\nInstall it with `pip3 install tqdm` (or equivalent), or run the program with the argument `-r`.')
exit(1)
pbar = tqdm(range(0, len(trainX), batch))
else:
pbar = range(0, len(trainX), batch)
# Batches loop
for j in pbar:
if reduce_output < 1:
pbar.set_description("Processing epoch %s" % (i+1))
# Select training data
X, Y = trainX[j:j+batch], trainY[j:j+batch]
# Correct the network
grad, cost = grads(
X, Y, weights, activations_fn, activations_prime)
weights -= learning_rate * grad
cost_table.append(cost)
average_cost = np.mean(cost_table)
average_cost_table.append(average_cost)
# Make prediction for epoch
prediction = np.argmax(feed_forward(
testX, weights, activations_fn)[-1], axis=1)
accuracy = np.mean(prediction == np.argmax(testY, axis=1))
accuracy_table.append(accuracy)
if reduce_output < 2:
print('Accuracy at epoch', i+1, ':',
accuracy, ' cost =', average_cost)
if reduce_output == 2:
print(i+1, accuracy, average_cost)
# Save temp file if set so
if filename:
if save_timeout > 0:
if i % save_timeout == 0:
temp_filename = '../trains/temp/' + \
filename + '_epoch_' + str(i) + '.npz'
temp_filename = os.path.join(path, temp_filename)
infos = [accuracy, learning_rate, i, batch]
utils.save(weights, activations_fn,
temp_filename, no_infos, infos, reduce_output)
# Show plot of accuracy and cost
if graph:
print('Plotting training evolution...', end=' ', flush=True)
try:
import matplotlib.pyplot as plt
except ImportError:
print('Cannot find module `matplotlib`!\nInstall it with `pip3 install matplotlib` (or equivalent), or run the program with the argument `-g`.')
exit(1)
plt.plot(range(1, epochs+1), average_cost_table, label='cost')
plt.plot(range(0, epochs+1), accuracy_table, label='accuracy')
plt.xlim(0, epochs)
plt.ylim(0)
plt.grid(axis='both', linestyle=':')
plt.xlabel('Epoch number', fontsize=11)
plt.legend()
plt.show()
plt.close()
print('done !')
# Save final file
if filename:
filename = os.path.join(path, '../trains/' + filename + '.npz')
infos = [accuracy, learning_rate, epochs, batch]
utils.save(weights, activations_fn, filename,
no_infos, infos, reduce_output)
return (accuracy_table, average_cost_table), weights
def runTrain(params: dict, architecture: list, file=None) -> dict:
params: dict = json.loads(params)
epochs: int = params['epochs']
batch: int = params['batch']
learning_rate: float = params['learning_rate']
save_timeout: int = params['save_timeout']
graph: bool = params['graph']
no_infos: bool = params['no_infos']
reduce_output: int = params['reduce_output']
activations_arch, primes_arch = activations.listToActivations(
params['activations'], architecture)
# Print network visualization
if reduce_output < 1:
utils.print_network_visualization(
architecture, activations_arch, epochs, batch, learning_rate)
# Load data
trX, trY, teX, teY = mnist.load_data()
# Init weights
weights = [np.random.randn(*w) * 0.1 for w in architecture]
# Train network
tr, weights = train(weights, trX, trY, teX, teY, activations_arch, primes_arch, file,
epochs, batch, learning_rate, save_timeout, graph, no_infos, reduce_output)
return tr
| [
"numpy.sum",
"numpy.argmax",
"numpy.mean",
"os.path.join",
"activations.listToActivations",
"utils.save",
"mnist.load_data",
"json.loads",
"utils.print_network_visualization",
"numpy.random.randn",
"matplotlib.pyplot.close",
"os.path.dirname",
"numpy.empty_like",
"matplotlib.pyplot.show",
... | [((176, 198), 'numpy.seterr', 'np.seterr', ([], {'all': '"""raise"""'}), "(all='raise')\n", (185, 198), True, 'import numpy as np\n'), ((1648, 1670), 'numpy.empty_like', 'np.empty_like', (['weights'], {}), '(weights)\n', (1661, 1670), True, 'import numpy as np\n'), ((2296, 2321), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2311, 2321), False, 'import os\n'), ((6189, 6207), 'json.loads', 'json.loads', (['params'], {}), '(params)\n', (6199, 6207), False, 'import json\n'), ((6533, 6599), 'activations.listToActivations', 'activations.listToActivations', (["params['activations']", 'architecture'], {}), "(params['activations'], architecture)\n", (6562, 6599), False, 'import activations\n'), ((6829, 6846), 'mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (6844, 6846), False, 'import mnist\n'), ((1543, 1576), 'numpy.sum', 'np.sum', (['((y[-1] - y_expected) ** 2)'], {}), '((y[-1] - y_expected) ** 2)\n', (1549, 1576), True, 'import numpy as np\n'), ((3969, 3988), 'numpy.mean', 'np.mean', (['cost_table'], {}), '(cost_table)\n', (3976, 3988), True, 'import numpy as np\n'), ((5550, 5569), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'epochs'], {}), '(0, epochs)\n', (5558, 5569), True, 'import matplotlib.pyplot as plt\n'), ((5578, 5589), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)'], {}), '(0)\n', (5586, 5589), True, 'import matplotlib.pyplot as plt\n'), ((5598, 5634), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'axis': '"""both"""', 'linestyle': '""":"""'}), "(axis='both', linestyle=':')\n", (5606, 5634), True, 'import matplotlib.pyplot as plt\n'), ((5643, 5682), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch number"""'], {'fontsize': '(11)'}), "('Epoch number', fontsize=11)\n", (5653, 5682), True, 'import matplotlib.pyplot as plt\n'), ((5691, 5703), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5701, 5703), True, 'import matplotlib.pyplot as plt\n'), ((5712, 5722), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5720, 5722), True, 'import matplotlib.pyplot as plt\n'), ((5731, 5742), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5740, 5742), True, 'import matplotlib.pyplot as plt\n'), ((5826, 5878), 'os.path.join', 'os.path.join', (['path', "('../trains/' + filename + '.npz')"], {}), "(path, '../trains/' + filename + '.npz')\n", (5838, 5878), False, 'import os\n'), ((5946, 6023), 'utils.save', 'utils.save', (['weights', 'activations_fn', 'filename', 'no_infos', 'infos', 'reduce_output'], {}), '(weights, activations_fn, filename, no_infos, infos, reduce_output)\n', (5956, 6023), False, 'import utils\n'), ((6678, 6777), 'utils.print_network_visualization', 'utils.print_network_visualization', (['architecture', 'activations_arch', 'epochs', 'batch', 'learning_rate'], {}), '(architecture, activations_arch, epochs,\n batch, learning_rate)\n', (6711, 6777), False, 'import utils\n'), ((2555, 2579), 'numpy.argmax', 'np.argmax', (['testY'], {'axis': '(1)'}), '(testY, axis=1)\n', (2564, 2579), True, 'import numpy as np\n'), ((6882, 6901), 'numpy.random.randn', 'np.random.randn', (['*w'], {}), '(*w)\n', (6897, 6901), True, 'import numpy as np\n'), ((4217, 4241), 'numpy.argmax', 'np.argmax', (['testY'], {'axis': '(1)'}), '(testY, axis=1)\n', (4226, 4241), True, 'import numpy as np\n'), ((2715, 2739), 'numpy.argmax', 'np.argmax', (['testY'], {'axis': '(1)'}), '(testY, axis=1)\n', (2724, 2739), True, 'import numpy as np\n'), ((4783, 4816), 'os.path.join', 'os.path.join', (['path', 'temp_filename'], {}), '(path, temp_filename)\n', (4795, 4816), False, 'import os\n'), ((4903, 4989), 'utils.save', 'utils.save', (['weights', 'activations_fn', 'temp_filename', 'no_infos', 'infos', 'reduce_output'], {}), '(weights, activations_fn, temp_filename, no_infos, infos,\n reduce_output)\n', (4913, 4989), False, 'import utils\n')] |
'''
REDS dataset
support reading images from lmdb, image folder and memcached
'''
import os.path as osp
import random
import pickle
import logging
import numpy as np
import cv2
import lmdb
import torch
import torch.utils.data as data
import data.util as util
try:
import mc # import memcached
except ImportError:
pass
logger = logging.getLogger('base')
class REDSDataset(data.Dataset):
'''
Reading the training REDS dataset
key example: 000_00000000
GT: Ground-Truth;
LQ: Low-Quality, e.g., low-resolution/blurry/noisy/compressed frames
support reading N LQ frames, N = 1, 3, 5, 7
'''
def __init__(self, opt):
super(REDSDataset, self).__init__()
self.opt = opt
# temporal augmentation
self.interval_list = opt['interval_list']
self.random_reverse = opt['random_reverse']
logger.info('Temporal augmentation interval list: [{}], with random reverse is {}.'.format(
','.join(str(x) for x in opt['interval_list']), self.random_reverse))
self.half_N_frames = opt['N_frames'] // 2
self.GT_root, self.LQ_root = opt['dataroot_GT'], opt['dataroot_LQ']
self.data_type = self.opt['data_type']
self.LR_input = False if opt['GT_size'] == opt['LQ_size'] else True # low resolution inputs
#### directly load image keys
if self.data_type == 'lmdb':
self.paths_GT, _ = util.get_image_paths(self.data_type, opt['dataroot_GT'])
logger.info('Using lmdb meta info for cache keys.')
elif opt['cache_keys']:
logger.info('Using cache keys: {}'.format(opt['cache_keys']))
self.paths_GT = pickle.load(open(opt['cache_keys'], 'rb'))['keys']
else:
raise ValueError(
'Need to create cache keys (meta_info.pkl) by running [create_lmdb.py]')
# remove the REDS4 for testing
self.paths_GT = [
v for v in self.paths_GT if v.split('_')[0] not in ['000', '011', '015', '020']
]
assert self.paths_GT, 'Error: GT path is empty.'
if self.data_type == 'lmdb':
self.GT_env, self.LQ_env = None, None
elif self.data_type == 'mc': # memcached
self.mclient = None
elif self.data_type == 'img':
pass
else:
raise ValueError('Wrong data type: {}'.format(self.data_type))
def _init_lmdb(self):
# https://github.com/chainer/chainermn/issues/129
self.GT_env = lmdb.open(self.opt['dataroot_GT'], readonly=True, lock=False, readahead=False,
meminit=False)
self.LQ_env = lmdb.open(self.opt['dataroot_LQ'], readonly=True, lock=False, readahead=False,
meminit=False)
def _ensure_memcached(self):
if self.mclient is None:
# specify the config files
server_list_config_file = None
client_config_file = None
self.mclient = mc.MemcachedClient.GetInstance(server_list_config_file,
client_config_file)
def _read_img_mc(self, path):
''' Return BGR, HWC, [0, 255], uint8'''
value = mc.pyvector()
self.mclient.Get(path, value)
value_buf = mc.ConvertBuffer(value)
img_array = np.frombuffer(value_buf, np.uint8)
img = cv2.imdecode(img_array, cv2.IMREAD_UNCHANGED)
return img
def _read_img_mc_BGR(self, path, name_a, name_b):
''' Read BGR channels separately and then combine for 1M limits in cluster'''
img_B = self._read_img_mc(osp.join(path + '_B', name_a, name_b + '.png'))
img_G = self._read_img_mc(osp.join(path + '_G', name_a, name_b + '.png'))
img_R = self._read_img_mc(osp.join(path + '_R', name_a, name_b + '.png'))
img = cv2.merge((img_B, img_G, img_R))
return img
def __getitem__(self, index):
if self.data_type == 'mc':
self._ensure_memcached()
elif self.data_type == 'lmdb' and (self.GT_env is None or self.LQ_env is None):
self._init_lmdb()
scale = self.opt['scale']
GT_size = self.opt['GT_size']
key = self.paths_GT[index]
name_a, name_b = key.split('_')
center_frame_idx = int(name_b)
#### determine the neighbor frames
interval = random.choice(self.interval_list)
if self.opt['border_mode']:
direction = 1 # 1: forward; 0: backward
N_frames = self.opt['N_frames']
if self.random_reverse and random.random() < 0.5:
direction = random.choice([0, 1])
if center_frame_idx + interval * (N_frames - 1) > 99:
direction = 0
elif center_frame_idx - interval * (N_frames - 1) < 0:
direction = 1
# get the neighbor list
if direction == 1:
neighbor_list = list(
range(center_frame_idx, center_frame_idx + interval * N_frames, interval))
else:
neighbor_list = list(
range(center_frame_idx, center_frame_idx - interval * N_frames, -interval))
name_b = '{:08d}'.format(neighbor_list[0])
else:
# ensure not exceeding the borders
while (center_frame_idx + self.half_N_frames * interval >
99) or (center_frame_idx - self.half_N_frames * interval < 0):
center_frame_idx = random.randint(0, 99)
# get the neighbor list
neighbor_list = list(
range(center_frame_idx - self.half_N_frames * interval,
center_frame_idx + self.half_N_frames * interval + 1, interval))
if self.random_reverse and random.random() < 0.5:
neighbor_list.reverse()
name_b = '{:08d}'.format(neighbor_list[self.half_N_frames])
assert len(
neighbor_list) == self.opt['N_frames'], 'Wrong length of neighbor list: {}'.format(
len(neighbor_list))
#### get the GT image (as the center frame)
if self.data_type == 'mc':
img_GT = self._read_img_mc_BGR(self.GT_root, name_a, name_b)
img_GT = img_GT.astype(np.float32) / 255.
elif self.data_type == 'lmdb':
img_GT = util.read_img(self.GT_env, key, (3, 2160, 3840))
#img_GT = util.read_img(self.GT_env, key, (3, 720, 1280))
else:
img_GT = util.read_img(None, osp.join(self.GT_root, name_a, name_b + '.png'))
#### get LQ images
LQ_size_tuple = (3, 540, 960) if self.LR_input else (3, 2160, 3840)
#LQ_size_tuple = (3, 180, 320) if self.LR_input else (3, 720, 1280)
img_LQ_l = []
for v in neighbor_list:
img_LQ_path = osp.join(self.LQ_root, name_a, '{:08d}.png'.format(v))
if self.data_type == 'mc':
if self.LR_input:
img_LQ = self._read_img_mc(img_LQ_path)
else:
img_LQ = self._read_img_mc_BGR(self.LQ_root, name_a, '{:08d}'.format(v))
img_LQ = img_LQ.astype(np.float32) / 255.
elif self.data_type == 'lmdb':
img_LQ = util.read_img(self.LQ_env, '{}_{:08d}'.format(name_a, v), LQ_size_tuple)
else:
img_LQ = util.read_img(None, img_LQ_path)
img_LQ_l.append(img_LQ)
if self.opt['phase'] == 'train':
C, H, W = LQ_size_tuple # LQ size
# randomly crop
if self.LR_input:
LQ_size = GT_size // scale
rnd_h = random.randint(0, max(0, H - LQ_size))
rnd_w = random.randint(0, max(0, W - LQ_size))
img_LQ_l = [v[rnd_h:rnd_h + LQ_size, rnd_w:rnd_w + LQ_size, :] for v in img_LQ_l]
rnd_h_HR, rnd_w_HR = int(rnd_h * scale), int(rnd_w * scale)
img_GT = img_GT[rnd_h_HR:rnd_h_HR + GT_size, rnd_w_HR:rnd_w_HR + GT_size, :]
else:
rnd_h = random.randint(0, max(0, H - GT_size))
rnd_w = random.randint(0, max(0, W - GT_size))
img_LQ_l = [v[rnd_h:rnd_h + GT_size, rnd_w:rnd_w + GT_size, :] for v in img_LQ_l]
img_GT = img_GT[rnd_h:rnd_h + GT_size, rnd_w:rnd_w + GT_size, :]
# augmentation - flip, rotate
img_LQ_l.append(img_GT)
rlt = util.augment(img_LQ_l, self.opt['use_flip'], self.opt['use_rot'])
img_LQ_l = rlt[0:-1]
img_GT = rlt[-1]
# stack LQ images to NHWC, N is the frame number
img_LQs = np.stack(img_LQ_l, axis=0)
# BGR to RGB, HWC to CHW, numpy to tensor
img_GT = img_GT[:, :, [2, 1, 0]]
img_LQs = img_LQs[:, :, :, [2, 1, 0]]
img_GT = torch.from_numpy(np.ascontiguousarray(np.transpose(img_GT, (2, 0, 1)))).float()
img_LQs = torch.from_numpy(np.ascontiguousarray(np.transpose(img_LQs,
(0, 3, 1, 2)))).float()
return {'LQs': img_LQs, 'GT': img_GT, 'key': key}
def __len__(self):
return len(self.paths_GT)
| [
"numpy.stack",
"data.util.get_image_paths",
"random.randint",
"mc.pyvector",
"numpy.frombuffer",
"data.util.read_img",
"cv2.imdecode",
"data.util.augment",
"random.choice",
"numpy.transpose",
"mc.MemcachedClient.GetInstance",
"random.random",
"mc.ConvertBuffer",
"lmdb.open",
"cv2.merge",... | [((337, 362), 'logging.getLogger', 'logging.getLogger', (['"""base"""'], {}), "('base')\n", (354, 362), False, 'import logging\n'), ((2504, 2602), 'lmdb.open', 'lmdb.open', (["self.opt['dataroot_GT']"], {'readonly': '(True)', 'lock': '(False)', 'readahead': '(False)', 'meminit': '(False)'}), "(self.opt['dataroot_GT'], readonly=True, lock=False, readahead=\n False, meminit=False)\n", (2513, 2602), False, 'import lmdb\n'), ((2652, 2750), 'lmdb.open', 'lmdb.open', (["self.opt['dataroot_LQ']"], {'readonly': '(True)', 'lock': '(False)', 'readahead': '(False)', 'meminit': '(False)'}), "(self.opt['dataroot_LQ'], readonly=True, lock=False, readahead=\n False, meminit=False)\n", (2661, 2750), False, 'import lmdb\n'), ((3225, 3238), 'mc.pyvector', 'mc.pyvector', ([], {}), '()\n', (3236, 3238), False, 'import mc\n'), ((3297, 3320), 'mc.ConvertBuffer', 'mc.ConvertBuffer', (['value'], {}), '(value)\n', (3313, 3320), False, 'import mc\n'), ((3341, 3375), 'numpy.frombuffer', 'np.frombuffer', (['value_buf', 'np.uint8'], {}), '(value_buf, np.uint8)\n', (3354, 3375), True, 'import numpy as np\n'), ((3390, 3435), 'cv2.imdecode', 'cv2.imdecode', (['img_array', 'cv2.IMREAD_UNCHANGED'], {}), '(img_array, cv2.IMREAD_UNCHANGED)\n', (3402, 3435), False, 'import cv2\n'), ((3856, 3888), 'cv2.merge', 'cv2.merge', (['(img_B, img_G, img_R)'], {}), '((img_B, img_G, img_R))\n', (3865, 3888), False, 'import cv2\n'), ((4383, 4416), 'random.choice', 'random.choice', (['self.interval_list'], {}), '(self.interval_list)\n', (4396, 4416), False, 'import random\n'), ((8667, 8693), 'numpy.stack', 'np.stack', (['img_LQ_l'], {'axis': '(0)'}), '(img_LQ_l, axis=0)\n', (8675, 8693), True, 'import numpy as np\n'), ((1419, 1475), 'data.util.get_image_paths', 'util.get_image_paths', (['self.data_type', "opt['dataroot_GT']"], {}), "(self.data_type, opt['dataroot_GT'])\n", (1439, 1475), True, 'import data.util as util\n'), ((2992, 3067), 'mc.MemcachedClient.GetInstance', 'mc.MemcachedClient.GetInstance', (['server_list_config_file', 'client_config_file'], {}), '(server_list_config_file, client_config_file)\n', (3022, 3067), False, 'import mc\n'), ((3630, 3676), 'os.path.join', 'osp.join', (["(path + '_B')", 'name_a', "(name_b + '.png')"], {}), "(path + '_B', name_a, name_b + '.png')\n", (3638, 3676), True, 'import os.path as osp\n'), ((3712, 3758), 'os.path.join', 'osp.join', (["(path + '_G')", 'name_a', "(name_b + '.png')"], {}), "(path + '_G', name_a, name_b + '.png')\n", (3720, 3758), True, 'import os.path as osp\n'), ((3794, 3840), 'os.path.join', 'osp.join', (["(path + '_R')", 'name_a', "(name_b + '.png')"], {}), "(path + '_R', name_a, name_b + '.png')\n", (3802, 3840), True, 'import os.path as osp\n'), ((8463, 8528), 'data.util.augment', 'util.augment', (['img_LQ_l', "self.opt['use_flip']", "self.opt['use_rot']"], {}), "(img_LQ_l, self.opt['use_flip'], self.opt['use_rot'])\n", (8475, 8528), True, 'import data.util as util\n'), ((4640, 4661), 'random.choice', 'random.choice', (['[0, 1]'], {}), '([0, 1])\n', (4653, 4661), False, 'import random\n'), ((5510, 5531), 'random.randint', 'random.randint', (['(0)', '(99)'], {}), '(0, 99)\n', (5524, 5531), False, 'import random\n'), ((6363, 6411), 'data.util.read_img', 'util.read_img', (['self.GT_env', 'key', '(3, 2160, 3840)'], {}), '(self.GT_env, key, (3, 2160, 3840))\n', (6376, 6411), True, 'import data.util as util\n'), ((4589, 4604), 'random.random', 'random.random', ([], {}), '()\n', (4602, 4604), False, 'import random\n'), ((5800, 5815), 'random.random', 'random.random', ([], {}), '()\n', (5813, 5815), False, 'import random\n'), ((6537, 6584), 'os.path.join', 'osp.join', (['self.GT_root', 'name_a', "(name_b + '.png')"], {}), "(self.GT_root, name_a, name_b + '.png')\n", (6545, 6584), True, 'import os.path as osp\n'), ((7391, 7423), 'data.util.read_img', 'util.read_img', (['None', 'img_LQ_path'], {}), '(None, img_LQ_path)\n', (7404, 7423), True, 'import data.util as util\n'), ((8886, 8917), 'numpy.transpose', 'np.transpose', (['img_GT', '(2, 0, 1)'], {}), '(img_GT, (2, 0, 1))\n', (8898, 8917), True, 'import numpy as np\n'), ((8984, 9019), 'numpy.transpose', 'np.transpose', (['img_LQs', '(0, 3, 1, 2)'], {}), '(img_LQs, (0, 3, 1, 2))\n', (8996, 9019), True, 'import numpy as np\n')] |
from collections import OrderedDict
from io import StringIO
from itertools import islice
import os
from typing import Any, Callable, Optional, Type
import numpy as np
import pandas._libs.json as json
from pandas._libs.tslibs import iNaT
from pandas.errors import AbstractMethodError
from pandas.core.dtypes.common import ensure_str, is_period_dtype
from pandas import DataFrame, MultiIndex, Series, isna, to_datetime
from pandas._typing import JSONSerializable
from pandas.core.reshape.concat import concat
from pandas.io.common import (
BaseIterator,
_get_handle,
_infer_compression,
_stringify_path,
get_filepath_or_buffer,
)
from pandas.io.formats.printing import pprint_thing
from pandas.io.parsers import _validate_integer
from ._normalize import convert_to_line_delimits
from ._table_schema import build_table_schema, parse_table_schema
loads = json.loads
dumps = json.dumps
TABLE_SCHEMA_VERSION = "0.20.0"
# interface to/from
def to_json(
path_or_buf,
obj,
orient: Optional[str] = None,
date_format: str = "epoch",
double_precision: int = 10,
force_ascii: bool = True,
date_unit: str = "ms",
default_handler: Optional[Callable[[Any], JSONSerializable]] = None,
lines: bool = False,
compression: Optional[str] = "infer",
index: bool = True,
indent: int = 0,
):
if not index and orient not in ["split", "table"]:
raise ValueError(
"'index=False' is only valid when 'orient' is " "'split' or 'table'"
)
path_or_buf = _stringify_path(path_or_buf)
if lines and orient != "records":
raise ValueError("'lines' keyword only valid when 'orient' is records")
if orient == "table" and isinstance(obj, Series):
obj = obj.to_frame(name=obj.name or "values")
if orient == "table" and isinstance(obj, DataFrame):
writer = JSONTableWriter # type: Type["Writer"]
elif isinstance(obj, Series):
writer = SeriesWriter
elif isinstance(obj, DataFrame):
writer = FrameWriter
else:
raise NotImplementedError("'obj' should be a Series or a DataFrame")
s = writer(
obj,
orient=orient,
date_format=date_format,
double_precision=double_precision,
ensure_ascii=force_ascii,
date_unit=date_unit,
default_handler=default_handler,
index=index,
indent=indent,
).write()
if lines:
s = convert_to_line_delimits(s)
if isinstance(path_or_buf, str):
fh, handles = _get_handle(path_or_buf, "w", compression=compression)
try:
fh.write(s)
finally:
fh.close()
elif path_or_buf is None:
return s
else:
path_or_buf.write(s)
class Writer:
def __init__(
self,
obj,
orient: Optional[str],
date_format: str,
double_precision: int,
ensure_ascii: bool,
date_unit: str,
index: bool,
default_handler: Optional[Callable[[Any], JSONSerializable]] = None,
indent: int = 0,
):
self.obj = obj
if orient is None:
orient = self._default_orient # type: ignore
self.orient = orient
self.date_format = date_format
self.double_precision = double_precision
self.ensure_ascii = ensure_ascii
self.date_unit = date_unit
self.default_handler = default_handler
self.index = index
self.indent = indent
self.is_copy = None
self._format_axes()
def _format_axes(self):
raise AbstractMethodError(self)
def write(self):
return self._write(
self.obj,
self.orient,
self.double_precision,
self.ensure_ascii,
self.date_unit,
self.date_format == "iso",
self.default_handler,
self.indent,
)
def _write(
self,
obj,
orient: Optional[str],
double_precision: int,
ensure_ascii: bool,
date_unit: str,
iso_dates: bool,
default_handler: Optional[Callable[[Any], JSONSerializable]],
indent: int,
):
return dumps(
obj,
orient=orient,
double_precision=double_precision,
ensure_ascii=ensure_ascii,
date_unit=date_unit,
iso_dates=iso_dates,
default_handler=default_handler,
indent=indent,
)
class SeriesWriter(Writer):
_default_orient = "index"
def _format_axes(self):
if not self.obj.index.is_unique and self.orient == "index":
raise ValueError(
"Series index must be unique for orient="
"'{orient}'".format(orient=self.orient)
)
def _write(
self,
obj,
orient: Optional[str],
double_precision: int,
ensure_ascii: bool,
date_unit: str,
iso_dates: bool,
default_handler: Optional[Callable[[Any], JSONSerializable]],
indent: int,
):
if not self.index and orient == "split":
obj = {"name": obj.name, "data": obj.values}
return super()._write(
obj,
orient,
double_precision,
ensure_ascii,
date_unit,
iso_dates,
default_handler,
indent,
)
class FrameWriter(Writer):
_default_orient = "columns"
def _format_axes(self):
"""
Try to format axes if they are datelike.
"""
if not self.obj.index.is_unique and self.orient in ("index", "columns"):
raise ValueError(
"DataFrame index must be unique for orient="
"'{orient}'.".format(orient=self.orient)
)
if not self.obj.columns.is_unique and self.orient in (
"index",
"columns",
"records",
):
raise ValueError(
"DataFrame columns must be unique for orient="
"'{orient}'.".format(orient=self.orient)
)
def _write(
self,
obj,
orient: Optional[str],
double_precision: int,
ensure_ascii: bool,
date_unit: str,
iso_dates: bool,
default_handler: Optional[Callable[[Any], JSONSerializable]],
indent: int,
):
if not self.index and orient == "split":
obj = obj.to_dict(orient="split")
del obj["index"]
return super()._write(
obj,
orient,
double_precision,
ensure_ascii,
date_unit,
iso_dates,
default_handler,
indent,
)
class JSONTableWriter(FrameWriter):
_default_orient = "records"
def __init__(
self,
obj,
orient: Optional[str],
date_format: str,
double_precision: int,
ensure_ascii: bool,
date_unit: str,
index: bool,
default_handler: Optional[Callable[[Any], JSONSerializable]] = None,
indent: int = 0,
):
"""
Adds a `schema` attribute with the Table Schema, resets
the index (can't do in caller, because the schema inference needs
to know what the index is, forces orient to records, and forces
date_format to 'iso'.
"""
super().__init__(
obj,
orient,
date_format,
double_precision,
ensure_ascii,
date_unit,
index,
default_handler=default_handler,
indent=indent,
)
if date_format != "iso":
msg = (
"Trying to write with `orient='table'` and "
"`date_format='{fmt}'`. Table Schema requires dates "
"to be formatted with `date_format='iso'`".format(fmt=date_format)
)
raise ValueError(msg)
self.schema = build_table_schema(obj, index=self.index)
# NotImplemented on a column MultiIndex
if obj.ndim == 2 and isinstance(obj.columns, MultiIndex):
raise NotImplementedError("orient='table' is not supported for MultiIndex")
# TODO: Do this timedelta properly in objToJSON.c See GH #15137
if (
(obj.ndim == 1)
and (obj.name in set(obj.index.names))
or len(obj.columns & obj.index.names)
):
msg = "Overlapping names between the index and columns"
raise ValueError(msg)
obj = obj.copy()
timedeltas = obj.select_dtypes(include=["timedelta"]).columns
if len(timedeltas):
obj[timedeltas] = obj[timedeltas].applymap(lambda x: x.isoformat())
# Convert PeriodIndex to datetimes before serialzing
if is_period_dtype(obj.index):
obj.index = obj.index.to_timestamp()
# exclude index from obj if index=False
if not self.index:
self.obj = obj.reset_index(drop=True)
else:
self.obj = obj.reset_index(drop=False)
self.date_format = "iso"
self.orient = "records"
self.index = index
def _write(
self,
obj,
orient,
double_precision,
ensure_ascii,
date_unit,
iso_dates,
default_handler,
indent,
):
table_obj = OrderedDict((("schema", self.schema), ("data", obj)))
serialized = super()._write(
table_obj,
orient,
double_precision,
ensure_ascii,
date_unit,
iso_dates,
default_handler,
indent,
)
return serialized
def read_json(
path_or_buf=None,
orient=None,
typ="frame",
dtype=None,
convert_axes=None,
convert_dates=True,
keep_default_dates=True,
numpy=False,
precise_float=False,
date_unit=None,
encoding=None,
lines=False,
chunksize=None,
compression="infer",
):
"""
Convert a JSON string to pandas object.
Parameters
----------
path_or_buf : a valid JSON str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.json``.
If you want to pass in a path object, pandas accepts any
``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handler (e.g. via builtin ``open`` function)
or ``StringIO``.
orient : str
Indication of expected JSON string format.
Compatible JSON strings can be produced by ``to_json()`` with a
corresponding orient value.
The set of possible orients is:
- ``'split'`` : dict like
``{index -> [index], columns -> [columns], data -> [values]}``
- ``'records'`` : list like
``[{column -> value}, ... , {column -> value}]``
- ``'index'`` : dict like ``{index -> {column -> value}}``
- ``'columns'`` : dict like ``{column -> {index -> value}}``
- ``'values'`` : just the values array
The allowed and default values depend on the value
of the `typ` parameter.
* when ``typ == 'series'``,
- allowed orients are ``{'split','records','index'}``
- default is ``'index'``
- The Series index must be unique for orient ``'index'``.
* when ``typ == 'frame'``,
- allowed orients are ``{'split','records','index',
'columns','values', 'table'}``
- default is ``'columns'``
- The DataFrame index must be unique for orients ``'index'`` and
``'columns'``.
- The DataFrame columns must be unique for orients ``'index'``,
``'columns'``, and ``'records'``.
.. versionadded:: 0.23.0
'table' as an allowed value for the ``orient`` argument
typ : {'frame', 'series'}, default 'frame'
The type of object to recover.
dtype : bool or dict, default None
If True, infer dtypes; if a dict of column to dtype, then use those;
if False, then don't infer dtypes at all, applies only to the data.
For all ``orient`` values except ``'table'``, default is True.
.. versionchanged:: 0.25.0
Not applicable for ``orient='table'``.
convert_axes : bool, default None
Try to convert the axes to the proper dtypes.
For all ``orient`` values except ``'table'``, default is True.
.. versionchanged:: 0.25.0
Not applicable for ``orient='table'``.
convert_dates : bool or list of str, default True
List of columns to parse for dates. If True, then try to parse
datelike columns. A column label is datelike if
* it ends with ``'_at'``,
* it ends with ``'_time'``,
* it begins with ``'timestamp'``,
* it is ``'modified'``, or
* it is ``'date'``.
keep_default_dates : bool, default True
If parsing dates, then parse the default datelike columns.
numpy : bool, default False
Direct decoding to numpy arrays. Supports numeric data only, but
non-numeric column and index labels are supported. Note also that the
JSON ordering MUST be the same for each term if numpy=True.
precise_float : bool, default False
Set to enable usage of higher precision (strtod) function when
decoding string to double values. Default (False) is to use fast but
less precise builtin functionality.
date_unit : str, default None
The timestamp unit to detect if converting dates. The default behaviour
is to try and detect the correct precision, but if this is not desired
then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds,
milliseconds, microseconds or nanoseconds respectively.
encoding : str, default is 'utf-8'
The encoding to use to decode py3 bytes.
lines : bool, default False
Read the file as a json object per line.
chunksize : int, optional
Return JsonReader object for iteration.
See the `line-delimited json docs
<http://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#line-delimited-json>`_
for more information on ``chunksize``.
This can only be passed if `lines=True`.
If this is None, the file will be read into memory all at once.
.. versionadded:: 0.21.0
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer', then use
gzip, bz2, zip or xz if path_or_buf is a string ending in
'.gz', '.bz2', '.zip', or 'xz', respectively, and no decompression
otherwise. If using 'zip', the ZIP file must contain only one data
file to be read in. Set to None for no decompression.
.. versionadded:: 0.21.0
Returns
-------
Series or DataFrame
The type returned depends on the value of `typ`.
See Also
--------
DataFrame.to_json : Convert a DataFrame to a JSON string.
Series.to_json : Convert a Series to a JSON string.
Notes
-----
Specific to ``orient='table'``, if a :class:`DataFrame` with a literal
:class:`Index` name of `index` gets written with :func:`to_json`, the
subsequent read operation will incorrectly set the :class:`Index` name to
``None``. This is because `index` is also used by :func:`DataFrame.to_json`
to denote a missing :class:`Index` name, and the subsequent
:func:`read_json` operation cannot distinguish between the two. The same
limitation is encountered with a :class:`MultiIndex` and any names
beginning with ``'level_'``.
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
Encoding/decoding a Dataframe using ``'split'`` formatted JSON:
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
>>> pd.read_json(_, orient='split')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
>>> pd.read_json(_, orient='index')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> pd.read_json(_, orient='records')
col 1 col 2
0 a b
1 c d
Encoding with Table Schema
>>> df.to_json(orient='table')
'{"schema": {"fields": [{"name": "index", "type": "string"},
{"name": "col 1", "type": "string"},
{"name": "col 2", "type": "string"}],
"primaryKey": "index",
"pandas_version": "0.20.0"},
"data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
{"index": "row 2", "col 1": "c", "col 2": "d"}]}'
"""
if orient == "table" and dtype:
raise ValueError("cannot pass both dtype and orient='table'")
if orient == "table" and convert_axes:
raise ValueError("cannot pass both convert_axes and orient='table'")
if dtype is None and orient != "table":
dtype = True
if convert_axes is None and orient != "table":
convert_axes = True
if encoding is None:
encoding = "utf-8"
compression = _infer_compression(path_or_buf, compression)
filepath_or_buffer, _, compression, should_close = get_filepath_or_buffer(
path_or_buf, encoding=encoding, compression=compression
)
json_reader = JsonReader(
filepath_or_buffer,
orient=orient,
typ=typ,
dtype=dtype,
convert_axes=convert_axes,
convert_dates=convert_dates,
keep_default_dates=keep_default_dates,
numpy=numpy,
precise_float=precise_float,
date_unit=date_unit,
encoding=encoding,
lines=lines,
chunksize=chunksize,
compression=compression,
)
if chunksize:
return json_reader
result = json_reader.read()
if should_close:
filepath_or_buffer.close()
return result
class JsonReader(BaseIterator):
"""
JsonReader provides an interface for reading in a JSON file.
If initialized with ``lines=True`` and ``chunksize``, can be iterated over
``chunksize`` lines at a time. Otherwise, calling ``read`` reads in the
whole document.
"""
def __init__(
self,
filepath_or_buffer,
orient,
typ,
dtype,
convert_axes,
convert_dates,
keep_default_dates,
numpy,
precise_float,
date_unit,
encoding,
lines,
chunksize,
compression,
):
self.path_or_buf = filepath_or_buffer
self.orient = orient
self.typ = typ
self.dtype = dtype
self.convert_axes = convert_axes
self.convert_dates = convert_dates
self.keep_default_dates = keep_default_dates
self.numpy = numpy
self.precise_float = precise_float
self.date_unit = date_unit
self.encoding = encoding
self.compression = compression
self.lines = lines
self.chunksize = chunksize
self.nrows_seen = 0
self.should_close = False
if self.chunksize is not None:
self.chunksize = _validate_integer("chunksize", self.chunksize, 1)
if not self.lines:
raise ValueError("chunksize can only be passed if lines=True")
data = self._get_data_from_filepath(filepath_or_buffer)
self.data = self._preprocess_data(data)
def _preprocess_data(self, data):
"""
At this point, the data either has a `read` attribute (e.g. a file
object or a StringIO) or is a string that is a JSON document.
If self.chunksize, we prepare the data for the `__next__` method.
Otherwise, we read it into memory for the `read` method.
"""
if hasattr(data, "read") and not self.chunksize:
data = data.read()
if not hasattr(data, "read") and self.chunksize:
data = StringIO(data)
return data
def _get_data_from_filepath(self, filepath_or_buffer):
"""
The function read_json accepts three input types:
1. filepath (string-like)
2. file-like object (e.g. open file object, StringIO)
3. JSON string
This method turns (1) into (2) to simplify the rest of the processing.
It returns input types (2) and (3) unchanged.
"""
data = filepath_or_buffer
exists = False
if isinstance(data, str):
try:
exists = os.path.exists(filepath_or_buffer)
# gh-5874: if the filepath is too long will raise here
except (TypeError, ValueError):
pass
if exists or self.compression is not None:
data, _ = _get_handle(
filepath_or_buffer,
"r",
encoding=self.encoding,
compression=self.compression,
)
self.should_close = True
self.open_stream = data
return data
def _combine_lines(self, lines):
"""
Combines a list of JSON objects into one JSON object.
"""
lines = filter(None, map(lambda x: x.strip(), lines))
return "[" + ",".join(lines) + "]"
def read(self):
"""
Read the whole JSON input into a pandas object.
"""
if self.lines and self.chunksize:
obj = concat(self)
elif self.lines:
data = ensure_str(self.data)
obj = self._get_object_parser(self._combine_lines(data.split("\n")))
else:
obj = self._get_object_parser(self.data)
self.close()
return obj
def _get_object_parser(self, json):
"""
Parses a json document into a pandas object.
"""
typ = self.typ
dtype = self.dtype
kwargs = {
"orient": self.orient,
"dtype": self.dtype,
"convert_axes": self.convert_axes,
"convert_dates": self.convert_dates,
"keep_default_dates": self.keep_default_dates,
"numpy": self.numpy,
"precise_float": self.precise_float,
"date_unit": self.date_unit,
}
obj = None
if typ == "frame":
obj = FrameParser(json, **kwargs).parse()
if typ == "series" or obj is None:
if not isinstance(dtype, bool):
kwargs["dtype"] = dtype
obj = SeriesParser(json, **kwargs).parse()
return obj
def close(self):
"""
If we opened a stream earlier, in _get_data_from_filepath, we should
close it.
If an open stream or file was passed, we leave it open.
"""
if self.should_close:
try:
self.open_stream.close()
except (IOError, AttributeError):
pass
def __next__(self):
lines = list(islice(self.data, self.chunksize))
if lines:
lines_json = self._combine_lines(lines)
obj = self._get_object_parser(lines_json)
# Make sure that the returned objects have the right index.
obj.index = range(self.nrows_seen, self.nrows_seen + len(obj))
self.nrows_seen += len(obj)
return obj
self.close()
raise StopIteration
class Parser:
_STAMP_UNITS = ("s", "ms", "us", "ns")
_MIN_STAMPS = {
"s": 31536000,
"ms": 31536000000,
"us": 31536000000000,
"ns": 31536000000000000,
}
def __init__(
self,
json,
orient,
dtype=None,
convert_axes=True,
convert_dates=True,
keep_default_dates=False,
numpy=False,
precise_float=False,
date_unit=None,
):
self.json = json
if orient is None:
orient = self._default_orient
self.orient = orient
self.dtype = dtype
if orient == "split":
numpy = False
if date_unit is not None:
date_unit = date_unit.lower()
if date_unit not in self._STAMP_UNITS:
raise ValueError(
"date_unit must be one of {units}".format(units=self._STAMP_UNITS)
)
self.min_stamp = self._MIN_STAMPS[date_unit]
else:
self.min_stamp = self._MIN_STAMPS["s"]
self.numpy = numpy
self.precise_float = precise_float
self.convert_axes = convert_axes
self.convert_dates = convert_dates
self.date_unit = date_unit
self.keep_default_dates = keep_default_dates
self.obj = None
def check_keys_split(self, decoded):
"""
Checks that dict has only the appropriate keys for orient='split'.
"""
bad_keys = set(decoded.keys()).difference(set(self._split_keys))
if bad_keys:
bad_keys = ", ".join(bad_keys)
raise ValueError(
"JSON data had unexpected key(s): {bad_keys}".format(
bad_keys=pprint_thing(bad_keys)
)
)
def parse(self):
# try numpy
numpy = self.numpy
if numpy:
self._parse_numpy()
else:
self._parse_no_numpy()
if self.obj is None:
return None
if self.convert_axes:
self._convert_axes()
self._try_convert_types()
return self.obj
def _convert_axes(self):
"""
Try to convert axes.
"""
for axis in self.obj._AXIS_NUMBERS.keys():
new_axis, result = self._try_convert_data(
axis, self.obj._get_axis(axis), use_dtypes=False, convert_dates=True
)
if result:
setattr(self.obj, axis, new_axis)
def _try_convert_types(self):
raise AbstractMethodError(self)
def _try_convert_data(self, name, data, use_dtypes=True, convert_dates=True):
"""
Try to parse a ndarray like into a column by inferring dtype.
"""
# don't try to coerce, unless a force conversion
if use_dtypes:
if not self.dtype:
return data, False
elif self.dtype is True:
pass
else:
# dtype to force
dtype = (
self.dtype.get(name) if isinstance(self.dtype, dict) else self.dtype
)
if dtype is not None:
try:
dtype = np.dtype(dtype)
return data.astype(dtype), True
except (TypeError, ValueError):
return data, False
if convert_dates:
new_data, result = self._try_convert_to_date(data)
if result:
return new_data, True
result = False
if data.dtype == "object":
# try float
try:
data = data.astype("float64")
result = True
except (TypeError, ValueError):
pass
if data.dtype.kind == "f":
if data.dtype != "float64":
# coerce floats to 64
try:
data = data.astype("float64")
result = True
except (TypeError, ValueError):
pass
# don't coerce 0-len data
if len(data) and (data.dtype == "float" or data.dtype == "object"):
# coerce ints if we can
try:
new_data = data.astype("int64")
if (new_data == data).all():
data = new_data
result = True
except (TypeError, ValueError):
pass
# coerce ints to 64
if data.dtype == "int":
# coerce floats to 64
try:
data = data.astype("int64")
result = True
except (TypeError, ValueError):
pass
return data, result
def _try_convert_to_date(self, data):
"""
Try to parse a ndarray like into a date column.
Try to coerce object in epoch/iso formats and integer/float in epoch
formats. Return a boolean if parsing was successful.
"""
# no conversion on empty
if not len(data):
return data, False
new_data = data
if new_data.dtype == "object":
try:
new_data = data.astype("int64")
except (TypeError, ValueError, OverflowError):
pass
# ignore numbers that are out of range
if issubclass(new_data.dtype.type, np.number):
in_range = (
isna(new_data.values)
| (new_data > self.min_stamp)
| (new_data.values == iNaT)
)
if not in_range.all():
return data, False
date_units = (self.date_unit,) if self.date_unit else self._STAMP_UNITS
for date_unit in date_units:
try:
new_data = to_datetime(new_data, errors="raise", unit=date_unit)
except (ValueError, OverflowError):
continue
return new_data, True
return data, False
def _try_convert_dates(self):
raise AbstractMethodError(self)
class SeriesParser(Parser):
_default_orient = "index"
_split_keys = ("name", "index", "data")
def _parse_no_numpy(self):
json = self.json
orient = self.orient
if orient == "split":
decoded = {
str(k): v
for k, v in loads(json, precise_float=self.precise_float).items()
}
self.check_keys_split(decoded)
self.obj = Series(dtype=None, **decoded)
else:
self.obj = Series(loads(json, precise_float=self.precise_float), dtype=None)
def _parse_numpy(self):
json = self.json
orient = self.orient
if orient == "split":
decoded = loads(
json, dtype=None, numpy=True, precise_float=self.precise_float
)
decoded = {str(k): v for k, v in decoded.items()}
self.check_keys_split(decoded)
self.obj = Series(**decoded)
elif orient == "columns" or orient == "index":
self.obj = Series(
*loads(
json,
dtype=None,
numpy=True,
labelled=True,
precise_float=self.precise_float,
)
)
else:
self.obj = Series(
loads(json, dtype=None, numpy=True, precise_float=self.precise_float)
)
def _try_convert_types(self):
if self.obj is None:
return
obj, result = self._try_convert_data(
"data", self.obj, convert_dates=self.convert_dates
)
if result:
self.obj = obj
class FrameParser(Parser):
_default_orient = "columns"
_split_keys = ("columns", "index", "data")
def _parse_numpy(self):
json = self.json
orient = self.orient
if orient == "columns":
args = loads(
json,
dtype=None,
numpy=True,
labelled=True,
precise_float=self.precise_float,
)
if len(args):
args = (args[0].T, args[2], args[1])
self.obj = DataFrame(*args)
elif orient == "split":
decoded = loads(
json, dtype=None, numpy=True, precise_float=self.precise_float
)
decoded = {str(k): v for k, v in decoded.items()}
self.check_keys_split(decoded)
self.obj = DataFrame(**decoded)
elif orient == "values":
self.obj = DataFrame(
loads(json, dtype=None, numpy=True, precise_float=self.precise_float)
)
else:
self.obj = DataFrame(
*loads(
json,
dtype=None,
numpy=True,
labelled=True,
precise_float=self.precise_float,
)
)
def _parse_no_numpy(self):
json = self.json
orient = self.orient
if orient == "columns":
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None
)
elif orient == "split":
decoded = {
str(k): v
for k, v in loads(json, precise_float=self.precise_float).items()
}
self.check_keys_split(decoded)
self.obj = DataFrame(dtype=None, **decoded)
elif orient == "index":
self.obj = DataFrame.from_dict(
loads(json, precise_float=self.precise_float),
dtype=None,
orient="index",
)
elif orient == "table":
self.obj = parse_table_schema(json, precise_float=self.precise_float)
else:
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None
)
def _process_converter(self, f, filt=None):
"""
Take a conversion function and possibly recreate the frame.
"""
if filt is None:
filt = lambda col, c: True
needs_new_obj = False
new_obj = dict()
for i, (col, c) in enumerate(self.obj.items()):
if filt(col, c):
new_data, result = f(col, c)
if result:
c = new_data
needs_new_obj = True
new_obj[i] = c
if needs_new_obj:
# possibly handle dup columns
new_obj = DataFrame(new_obj, index=self.obj.index)
new_obj.columns = self.obj.columns
self.obj = new_obj
def _try_convert_types(self):
if self.obj is None:
return
if self.convert_dates:
self._try_convert_dates()
self._process_converter(
lambda col, c: self._try_convert_data(col, c, convert_dates=False)
)
def _try_convert_dates(self):
if self.obj is None:
return
# our columns to parse
convert_dates = self.convert_dates
if convert_dates is True:
convert_dates = []
convert_dates = set(convert_dates)
def is_ok(col):
"""
Return if this col is ok to try for a date parse.
"""
if not isinstance(col, str):
return False
col_lower = col.lower()
if (
col_lower.endswith("_at")
or col_lower.endswith("_time")
or col_lower == "modified"
or col_lower == "date"
or col_lower == "datetime"
or col_lower.startswith("timestamp")
):
return True
return False
self._process_converter(
lambda col, c: self._try_convert_to_date(c),
lambda col, c: (
(self.keep_default_dates and is_ok(col)) or col in convert_dates
),
)
| [
"pandas.io.formats.printing.pprint_thing",
"pandas.io.common.get_filepath_or_buffer",
"pandas.core.dtypes.common.ensure_str",
"pandas.io.common._stringify_path",
"pandas.io.common._get_handle",
"pandas.DataFrame",
"pandas.io.parsers._validate_integer",
"os.path.exists",
"pandas.isna",
"pandas.core... | [((1537, 1565), 'pandas.io.common._stringify_path', '_stringify_path', (['path_or_buf'], {}), '(path_or_buf)\n', (1552, 1565), False, 'from pandas.io.common import BaseIterator, _get_handle, _infer_compression, _stringify_path, get_filepath_or_buffer\n'), ((18064, 18108), 'pandas.io.common._infer_compression', '_infer_compression', (['path_or_buf', 'compression'], {}), '(path_or_buf, compression)\n', (18082, 18108), False, 'from pandas.io.common import BaseIterator, _get_handle, _infer_compression, _stringify_path, get_filepath_or_buffer\n'), ((18164, 18243), 'pandas.io.common.get_filepath_or_buffer', 'get_filepath_or_buffer', (['path_or_buf'], {'encoding': 'encoding', 'compression': 'compression'}), '(path_or_buf, encoding=encoding, compression=compression)\n', (18186, 18243), False, 'from pandas.io.common import BaseIterator, _get_handle, _infer_compression, _stringify_path, get_filepath_or_buffer\n'), ((2530, 2584), 'pandas.io.common._get_handle', '_get_handle', (['path_or_buf', '"""w"""'], {'compression': 'compression'}), "(path_or_buf, 'w', compression=compression)\n", (2541, 2584), False, 'from pandas.io.common import BaseIterator, _get_handle, _infer_compression, _stringify_path, get_filepath_or_buffer\n'), ((3585, 3610), 'pandas.errors.AbstractMethodError', 'AbstractMethodError', (['self'], {}), '(self)\n', (3604, 3610), False, 'from pandas.errors import AbstractMethodError\n'), ((8874, 8900), 'pandas.core.dtypes.common.is_period_dtype', 'is_period_dtype', (['obj.index'], {}), '(obj.index)\n', (8889, 8900), False, 'from pandas.core.dtypes.common import ensure_str, is_period_dtype\n'), ((9448, 9501), 'collections.OrderedDict', 'OrderedDict', (["(('schema', self.schema), ('data', obj))"], {}), "((('schema', self.schema), ('data', obj)))\n", (9459, 9501), False, 'from collections import OrderedDict\n'), ((26825, 26850), 'pandas.errors.AbstractMethodError', 'AbstractMethodError', (['self'], {}), '(self)\n', (26844, 26850), False, 'from pandas.errors import AbstractMethodError\n'), ((30342, 30367), 'pandas.errors.AbstractMethodError', 'AbstractMethodError', (['self'], {}), '(self)\n', (30361, 30367), False, 'from pandas.errors import AbstractMethodError\n'), ((20093, 20142), 'pandas.io.parsers._validate_integer', '_validate_integer', (['"""chunksize"""', 'self.chunksize', '(1)'], {}), "('chunksize', self.chunksize, 1)\n", (20110, 20142), False, 'from pandas.io.parsers import _validate_integer\n'), ((20878, 20892), 'io.StringIO', 'StringIO', (['data'], {}), '(data)\n', (20886, 20892), False, 'from io import StringIO\n'), ((21696, 21791), 'pandas.io.common._get_handle', '_get_handle', (['filepath_or_buffer', '"""r"""'], {'encoding': 'self.encoding', 'compression': 'self.compression'}), "(filepath_or_buffer, 'r', encoding=self.encoding, compression=\n self.compression)\n", (21707, 21791), False, 'from pandas.io.common import BaseIterator, _get_handle, _infer_compression, _stringify_path, get_filepath_or_buffer\n'), ((22350, 22362), 'pandas.core.reshape.concat.concat', 'concat', (['self'], {}), '(self)\n', (22356, 22362), False, 'from pandas.core.reshape.concat import concat\n'), ((23870, 23903), 'itertools.islice', 'islice', (['self.data', 'self.chunksize'], {}), '(self.data, self.chunksize)\n', (23876, 23903), False, 'from itertools import islice\n'), ((30801, 30830), 'pandas.Series', 'Series', ([], {'dtype': 'None'}), '(dtype=None, **decoded)\n', (30807, 30830), False, 'from pandas import DataFrame, MultiIndex, Series, isna, to_datetime\n'), ((31298, 31315), 'pandas.Series', 'Series', ([], {}), '(**decoded)\n', (31304, 31315), False, 'from pandas import DataFrame, MultiIndex, Series, isna, to_datetime\n'), ((32556, 32572), 'pandas.DataFrame', 'DataFrame', (['*args'], {}), '(*args)\n', (32565, 32572), False, 'from pandas import DataFrame, MultiIndex, Series, isna, to_datetime\n'), ((34920, 34960), 'pandas.DataFrame', 'DataFrame', (['new_obj'], {'index': 'self.obj.index'}), '(new_obj, index=self.obj.index)\n', (34929, 34960), False, 'from pandas import DataFrame, MultiIndex, Series, isna, to_datetime\n'), ((21455, 21489), 'os.path.exists', 'os.path.exists', (['filepath_or_buffer'], {}), '(filepath_or_buffer)\n', (21469, 21489), False, 'import os\n'), ((22407, 22428), 'pandas.core.dtypes.common.ensure_str', 'ensure_str', (['self.data'], {}), '(self.data)\n', (22417, 22428), False, 'from pandas.core.dtypes.common import ensure_str, is_period_dtype\n'), ((30105, 30158), 'pandas.to_datetime', 'to_datetime', (['new_data'], {'errors': '"""raise"""', 'unit': 'date_unit'}), "(new_data, errors='raise', unit=date_unit)\n", (30116, 30158), False, 'from pandas import DataFrame, MultiIndex, Series, isna, to_datetime\n'), ((32855, 32875), 'pandas.DataFrame', 'DataFrame', ([], {}), '(**decoded)\n', (32864, 32875), False, 'from pandas import DataFrame, MultiIndex, Series, isna, to_datetime\n'), ((33812, 33844), 'pandas.DataFrame', 'DataFrame', ([], {'dtype': 'None'}), '(dtype=None, **decoded)\n', (33821, 33844), False, 'from pandas import DataFrame, MultiIndex, Series, isna, to_datetime\n'), ((29747, 29768), 'pandas.isna', 'isna', (['new_data.values'], {}), '(new_data.values)\n', (29751, 29768), False, 'from pandas import DataFrame, MultiIndex, Series, isna, to_datetime\n'), ((26015, 26037), 'pandas.io.formats.printing.pprint_thing', 'pprint_thing', (['bad_keys'], {}), '(bad_keys)\n', (26027, 26037), False, 'from pandas.io.formats.printing import pprint_thing\n'), ((27512, 27527), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (27520, 27527), True, 'import numpy as np\n')] |
"""Functions to visualize matrices of data."""
import warnings
import matplotlib as mpl
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
import pandas as pd
try:
from scipy.cluster import hierarchy
_no_scipy = False
except ImportError:
_no_scipy = True
from . import cm
from .axisgrid import Grid
from .utils import (
despine,
axis_ticklabels_overlap,
relative_luminance,
to_utf8,
_draw_figure,
)
__all__ = ["heatmap", "clustermap"]
def _index_to_label(index):
"""Convert a pandas index or multiindex to an axis label."""
if isinstance(index, pd.MultiIndex):
return "-".join(map(to_utf8, index.names))
else:
return index.name
def _index_to_ticklabels(index):
"""Convert a pandas index or multiindex into ticklabels."""
if isinstance(index, pd.MultiIndex):
return ["-".join(map(to_utf8, i)) for i in index.values]
else:
return index.values
def _convert_colors(colors):
"""Convert either a list of colors or nested lists of colors to RGB."""
to_rgb = mpl.colors.to_rgb
try:
to_rgb(colors[0])
# If this works, there is only one level of colors
return list(map(to_rgb, colors))
except ValueError:
# If we get here, we have nested lists
return [list(map(to_rgb, l)) for l in colors]
def _matrix_mask(data, mask):
"""Ensure that data and mask are compatible and add missing values.
Values will be plotted for cells where ``mask`` is ``False``.
``data`` is expected to be a DataFrame; ``mask`` can be an array or
a DataFrame.
"""
if mask is None:
mask = np.zeros(data.shape, bool)
if isinstance(mask, np.ndarray):
# For array masks, ensure that shape matches data then convert
if mask.shape != data.shape:
raise ValueError("Mask must have the same shape as data.")
mask = pd.DataFrame(mask,
index=data.index,
columns=data.columns,
dtype=bool)
elif isinstance(mask, pd.DataFrame):
# For DataFrame masks, ensure that semantic labels match data
if not mask.index.equals(data.index) \
and mask.columns.equals(data.columns):
err = "Mask must have the same index and columns as data."
raise ValueError(err)
# Add any cells with missing data to the mask
# This works around an issue where `plt.pcolormesh` doesn't represent
# missing data properly
mask = mask | pd.isnull(data)
return mask
class _HeatMapper:
"""Draw a heatmap plot of a matrix with nice labels and colormaps."""
def __init__(self, data, vmin, vmax, cmap, center, robust, annot, fmt,
annot_kws, cbar, cbar_kws,
xticklabels=True, yticklabels=True, mask=None):
"""Initialize the plotting object."""
# We always want to have a DataFrame with semantic information
# and an ndarray to pass to matplotlib
if isinstance(data, pd.DataFrame):
plot_data = data.values
else:
plot_data = np.asarray(data)
data = pd.DataFrame(plot_data)
# Validate the mask and convert to DataFrame
mask = _matrix_mask(data, mask)
plot_data = np.ma.masked_where(np.asarray(mask), plot_data)
# Get good names for the rows and columns
xtickevery = 1
if isinstance(xticklabels, int):
xtickevery = xticklabels
xticklabels = _index_to_ticklabels(data.columns)
elif xticklabels is True:
xticklabels = _index_to_ticklabels(data.columns)
elif xticklabels is False:
xticklabels = []
ytickevery = 1
if isinstance(yticklabels, int):
ytickevery = yticklabels
yticklabels = _index_to_ticklabels(data.index)
elif yticklabels is True:
yticklabels = _index_to_ticklabels(data.index)
elif yticklabels is False:
yticklabels = []
if not len(xticklabels):
self.xticks = []
self.xticklabels = []
elif isinstance(xticklabels, str) and xticklabels == "auto":
self.xticks = "auto"
self.xticklabels = _index_to_ticklabels(data.columns)
else:
self.xticks, self.xticklabels = self._skip_ticks(xticklabels,
xtickevery)
if not len(yticklabels):
self.yticks = []
self.yticklabels = []
elif isinstance(yticklabels, str) and yticklabels == "auto":
self.yticks = "auto"
self.yticklabels = _index_to_ticklabels(data.index)
else:
self.yticks, self.yticklabels = self._skip_ticks(yticklabels,
ytickevery)
# Get good names for the axis labels
xlabel = _index_to_label(data.columns)
ylabel = _index_to_label(data.index)
self.xlabel = xlabel if xlabel is not None else ""
self.ylabel = ylabel if ylabel is not None else ""
# Determine good default values for the colormapping
self._determine_cmap_params(plot_data, vmin, vmax,
cmap, center, robust)
# Sort out the annotations
if annot is None or annot is False:
annot = False
annot_data = None
else:
if isinstance(annot, bool):
annot_data = plot_data
else:
annot_data = np.asarray(annot)
if annot_data.shape != plot_data.shape:
err = "`data` and `annot` must have same shape."
raise ValueError(err)
annot = True
# Save other attributes to the object
self.data = data
self.plot_data = plot_data
self.annot = annot
self.annot_data = annot_data
self.fmt = fmt
self.annot_kws = {} if annot_kws is None else annot_kws.copy()
self.cbar = cbar
self.cbar_kws = {} if cbar_kws is None else cbar_kws.copy()
def _determine_cmap_params(self, plot_data, vmin, vmax,
cmap, center, robust):
"""Use some heuristics to set good defaults for colorbar and range."""
# plot_data is a np.ma.array instance
calc_data = plot_data.astype(float).filled(np.nan)
if vmin is None:
if robust:
vmin = np.nanpercentile(calc_data, 2)
else:
vmin = np.nanmin(calc_data)
if vmax is None:
if robust:
vmax = np.nanpercentile(calc_data, 98)
else:
vmax = np.nanmax(calc_data)
self.vmin, self.vmax = vmin, vmax
# Choose default colormaps if not provided
if cmap is None:
if center is None:
self.cmap = cm.rocket
else:
self.cmap = cm.icefire
elif isinstance(cmap, str):
self.cmap = mpl.cm.get_cmap(cmap)
elif isinstance(cmap, list):
self.cmap = mpl.colors.ListedColormap(cmap)
else:
self.cmap = cmap
# Recenter a divergent colormap
if center is not None:
# Copy bad values
# in mpl<3.2 only masked values are honored with "bad" color spec
# (see https://github.com/matplotlib/matplotlib/pull/14257)
bad = self.cmap(np.ma.masked_invalid([np.nan]))[0]
# under/over values are set for sure when cmap extremes
# do not map to the same color as +-inf
under = self.cmap(-np.inf)
over = self.cmap(np.inf)
under_set = under != self.cmap(0)
over_set = over != self.cmap(self.cmap.N - 1)
vrange = max(vmax - center, center - vmin)
normlize = mpl.colors.Normalize(center - vrange, center + vrange)
cmin, cmax = normlize([vmin, vmax])
cc = np.linspace(cmin, cmax, 256)
self.cmap = mpl.colors.ListedColormap(self.cmap(cc))
self.cmap.set_bad(bad)
if under_set:
self.cmap.set_under(under)
if over_set:
self.cmap.set_over(over)
def _annotate_heatmap(self, ax, mesh):
"""Add textual labels with the value in each cell."""
mesh.update_scalarmappable()
height, width = self.annot_data.shape
xpos, ypos = np.meshgrid(np.arange(width) + .5, np.arange(height) + .5)
for x, y, m, color, val in zip(xpos.flat, ypos.flat,
mesh.get_array(), mesh.get_facecolors(),
self.annot_data.flat):
if m is not np.ma.masked:
lum = relative_luminance(color)
text_color = ".15" if lum > .408 else "w"
annotation = ("{:" + self.fmt + "}").format(val)
text_kwargs = dict(color=text_color, ha="center", va="center")
text_kwargs.update(self.annot_kws)
ax.text(x, y, annotation, **text_kwargs)
def _skip_ticks(self, labels, tickevery):
"""Return ticks and labels at evenly spaced intervals."""
n = len(labels)
if tickevery == 0:
ticks, labels = [], []
elif tickevery == 1:
ticks, labels = np.arange(n) + .5, labels
else:
start, end, step = 0, n, tickevery
ticks = np.arange(start, end, step) + .5
labels = labels[start:end:step]
return ticks, labels
def _auto_ticks(self, ax, labels, axis):
"""Determine ticks and ticklabels that minimize overlap."""
transform = ax.figure.dpi_scale_trans.inverted()
bbox = ax.get_window_extent().transformed(transform)
size = [bbox.width, bbox.height][axis]
axis = [ax.xaxis, ax.yaxis][axis]
tick, = axis.set_ticks([0])
fontsize = tick.label1.get_size()
max_ticks = int(size // (fontsize / 72))
if max_ticks < 1:
return [], []
tick_every = len(labels) // max_ticks + 1
tick_every = 1 if tick_every == 0 else tick_every
ticks, labels = self._skip_ticks(labels, tick_every)
return ticks, labels
def plot(self, ax, cax, kws):
"""Draw the heatmap on the provided Axes."""
# Remove all the Axes spines
despine(ax=ax, left=True, bottom=True)
# setting vmin/vmax in addition to norm is deprecated
# so avoid setting if norm is set
if "norm" not in kws:
kws.setdefault("vmin", self.vmin)
kws.setdefault("vmax", self.vmax)
# Draw the heatmap
mesh = ax.pcolormesh(self.plot_data, cmap=self.cmap, **kws)
# Set the axis limits
ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))
# Invert the y axis to show the plot in matrix form
ax.invert_yaxis()
# Possibly add a colorbar
if self.cbar:
cb = ax.figure.colorbar(mesh, cax, ax, **self.cbar_kws)
cb.outline.set_linewidth(0)
# If rasterized is passed to pcolormesh, also rasterize the
# colorbar to avoid white lines on the PDF rendering
if kws.get('rasterized', False):
cb.solids.set_rasterized(True)
# Add row and column labels
if isinstance(self.xticks, str) and self.xticks == "auto":
xticks, xticklabels = self._auto_ticks(ax, self.xticklabels, 0)
else:
xticks, xticklabels = self.xticks, self.xticklabels
if isinstance(self.yticks, str) and self.yticks == "auto":
yticks, yticklabels = self._auto_ticks(ax, self.yticklabels, 1)
else:
yticks, yticklabels = self.yticks, self.yticklabels
ax.set(xticks=xticks, yticks=yticks)
xtl = ax.set_xticklabels(xticklabels)
ytl = ax.set_yticklabels(yticklabels, rotation="vertical")
plt.setp(ytl, va="center") # GH2484
# Possibly rotate them if they overlap
_draw_figure(ax.figure)
if axis_ticklabels_overlap(xtl):
plt.setp(xtl, rotation="vertical")
if axis_ticklabels_overlap(ytl):
plt.setp(ytl, rotation="horizontal")
# Add the axis labels
ax.set(xlabel=self.xlabel, ylabel=self.ylabel)
# Annotate the cells with the formatted values
if self.annot:
self._annotate_heatmap(ax, mesh)
def heatmap(
data, *,
vmin=None, vmax=None, cmap=None, center=None, robust=False,
annot=None, fmt=".2g", annot_kws=None,
linewidths=0, linecolor="white",
cbar=True, cbar_kws=None, cbar_ax=None,
square=False, xticklabels="auto", yticklabels="auto",
mask=None, ax=None,
**kwargs
):
"""Plot rectangular data as a color-encoded matrix.
This is an Axes-level function and will draw the heatmap into the
currently-active Axes if none is provided to the ``ax`` argument. Part of
this Axes space will be taken and used to plot a colormap, unless ``cbar``
is False or a separate Axes is provided to ``cbar_ax``.
Parameters
----------
data : rectangular dataset
2D dataset that can be coerced into an ndarray. If a Pandas DataFrame
is provided, the index/column information will be used to label the
columns and rows.
vmin, vmax : floats, optional
Values to anchor the colormap, otherwise they are inferred from the
data and other keyword arguments.
cmap : matplotlib colormap name or object, or list of colors, optional
The mapping from data values to color space. If not provided, the
default will depend on whether ``center`` is set.
center : float, optional
The value at which to center the colormap when plotting divergent data.
Using this parameter will change the default ``cmap`` if none is
specified.
robust : bool, optional
If True and ``vmin`` or ``vmax`` are absent, the colormap range is
computed with robust quantiles instead of the extreme values.
annot : bool or rectangular dataset, optional
If True, write the data value in each cell. If an array-like with the
same shape as ``data``, then use this to annotate the heatmap instead
of the data. Note that DataFrames will match on position, not index.
fmt : str, optional
String formatting code to use when adding annotations.
annot_kws : dict of key, value mappings, optional
Keyword arguments for :meth:`matplotlib.axes.Axes.text` when ``annot``
is True.
linewidths : float, optional
Width of the lines that will divide each cell.
linecolor : color, optional
Color of the lines that will divide each cell.
cbar : bool, optional
Whether to draw a colorbar.
cbar_kws : dict of key, value mappings, optional
Keyword arguments for :meth:`matplotlib.figure.Figure.colorbar`.
cbar_ax : matplotlib Axes, optional
Axes in which to draw the colorbar, otherwise take space from the
main Axes.
square : bool, optional
If True, set the Axes aspect to "equal" so each cell will be
square-shaped.
xticklabels, yticklabels : "auto", bool, list-like, or int, optional
If True, plot the column names of the dataframe. If False, don't plot
the column names. If list-like, plot these alternate labels as the
xticklabels. If an integer, use the column names but plot only every
n label. If "auto", try to densely plot non-overlapping labels.
mask : bool array or DataFrame, optional
If passed, data will not be shown in cells where ``mask`` is True.
Cells with missing values are automatically masked.
ax : matplotlib Axes, optional
Axes in which to draw the plot, otherwise use the currently-active
Axes.
kwargs : other keyword arguments
All other keyword arguments are passed to
:meth:`matplotlib.axes.Axes.pcolormesh`.
Returns
-------
ax : matplotlib Axes
Axes object with the heatmap.
See Also
--------
clustermap : Plot a matrix using hierarchical clustering to arrange the
rows and columns.
Examples
--------
Plot a heatmap for a numpy array:
.. plot::
:context: close-figs
>>> import numpy as np; np.random.seed(0)
>>> import seaborn as sns; sns.set_theme()
>>> uniform_data = np.random.rand(10, 12)
>>> ax = sns.heatmap(uniform_data)
Change the limits of the colormap:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(uniform_data, vmin=0, vmax=1)
Plot a heatmap for data centered on 0 with a diverging colormap:
.. plot::
:context: close-figs
>>> normal_data = np.random.randn(10, 12)
>>> ax = sns.heatmap(normal_data, center=0)
Plot a dataframe with meaningful row and column labels:
.. plot::
:context: close-figs
>>> flights = sns.load_dataset("flights")
>>> flights = flights.pivot("month", "year", "passengers")
>>> ax = sns.heatmap(flights)
Annotate each cell with the numeric value using integer formatting:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, annot=True, fmt="d")
Add lines between each cell:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, linewidths=.5)
Use a different colormap:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, cmap="YlGnBu")
Center the colormap at a specific value:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, center=flights.loc["Jan", 1955])
Plot every other column label and don't plot row labels:
.. plot::
:context: close-figs
>>> data = np.random.randn(50, 20)
>>> ax = sns.heatmap(data, xticklabels=2, yticklabels=False)
Don't draw a colorbar:
.. plot::
:context: close-figs
>>> ax = sns.heatmap(flights, cbar=False)
Use different axes for the colorbar:
.. plot::
:context: close-figs
>>> grid_kws = {"height_ratios": (.9, .05), "hspace": .3}
>>> f, (ax, cbar_ax) = plt.subplots(2, gridspec_kw=grid_kws)
>>> ax = sns.heatmap(flights, ax=ax,
... cbar_ax=cbar_ax,
... cbar_kws={"orientation": "horizontal"})
Use a mask to plot only part of a matrix
.. plot::
:context: close-figs
>>> corr = np.corrcoef(np.random.randn(10, 200))
>>> mask = np.zeros_like(corr)
>>> mask[np.triu_indices_from(mask)] = True
>>> with sns.axes_style("white"):
... f, ax = plt.subplots(figsize=(7, 5))
... ax = sns.heatmap(corr, mask=mask, vmax=.3, square=True)
"""
# Initialize the plotter object
plotter = _HeatMapper(data, vmin, vmax, cmap, center, robust, annot, fmt,
annot_kws, cbar, cbar_kws, xticklabels,
yticklabels, mask)
# Add the pcolormesh kwargs here
kwargs["linewidths"] = linewidths
kwargs["edgecolor"] = linecolor
# Draw the plot and return the Axes
if ax is None:
ax = plt.gca()
if square:
ax.set_aspect("equal")
plotter.plot(ax, cbar_ax, kwargs)
return ax
class _DendrogramPlotter:
"""Object for drawing tree of similarities between data rows/columns"""
def __init__(self, data, linkage, metric, method, axis, label, rotate):
"""Plot a dendrogram of the relationships between the columns of data
Parameters
----------
data : pandas.DataFrame
Rectangular data
"""
self.axis = axis
if self.axis == 1:
data = data.T
if isinstance(data, pd.DataFrame):
array = data.values
else:
array = np.asarray(data)
data = pd.DataFrame(array)
self.array = array
self.data = data
self.shape = self.data.shape
self.metric = metric
self.method = method
self.axis = axis
self.label = label
self.rotate = rotate
if linkage is None:
self.linkage = self.calculated_linkage
else:
self.linkage = linkage
self.dendrogram = self.calculate_dendrogram()
# Dendrogram ends are always at multiples of 5, who knows why
ticks = 10 * np.arange(self.data.shape[0]) + 5
if self.label:
ticklabels = _index_to_ticklabels(self.data.index)
ticklabels = [ticklabels[i] for i in self.reordered_ind]
if self.rotate:
self.xticks = []
self.yticks = ticks
self.xticklabels = []
self.yticklabels = ticklabels
self.ylabel = _index_to_label(self.data.index)
self.xlabel = ''
else:
self.xticks = ticks
self.yticks = []
self.xticklabels = ticklabels
self.yticklabels = []
self.ylabel = ''
self.xlabel = _index_to_label(self.data.index)
else:
self.xticks, self.yticks = [], []
self.yticklabels, self.xticklabels = [], []
self.xlabel, self.ylabel = '', ''
self.dependent_coord = self.dendrogram['dcoord']
self.independent_coord = self.dendrogram['icoord']
def _calculate_linkage_scipy(self):
linkage = hierarchy.linkage(self.array, method=self.method,
metric=self.metric)
return linkage
def _calculate_linkage_fastcluster(self):
import fastcluster
# Fastcluster has a memory-saving vectorized version, but only
# with certain linkage methods, and mostly with euclidean metric
# vector_methods = ('single', 'centroid', 'median', 'ward')
euclidean_methods = ('centroid', 'median', 'ward')
euclidean = self.metric == 'euclidean' and self.method in \
euclidean_methods
if euclidean or self.method == 'single':
return fastcluster.linkage_vector(self.array,
method=self.method,
metric=self.metric)
else:
linkage = fastcluster.linkage(self.array, method=self.method,
metric=self.metric)
return linkage
@property
def calculated_linkage(self):
try:
return self._calculate_linkage_fastcluster()
except ImportError:
if np.product(self.shape) >= 10000:
msg = ("Clustering large matrix with scipy. Installing "
"`fastcluster` may give better performance.")
warnings.warn(msg)
return self._calculate_linkage_scipy()
def calculate_dendrogram(self):
"""Calculates a dendrogram based on the linkage matrix
Made a separate function, not a property because don't want to
recalculate the dendrogram every time it is accessed.
Returns
-------
dendrogram : dict
Dendrogram dictionary as returned by scipy.cluster.hierarchy
.dendrogram. The important key-value pairing is
"reordered_ind" which indicates the re-ordering of the matrix
"""
return hierarchy.dendrogram(self.linkage, no_plot=True,
color_threshold=-np.inf)
@property
def reordered_ind(self):
"""Indices of the matrix, reordered by the dendrogram"""
return self.dendrogram['leaves']
def plot(self, ax, tree_kws):
"""Plots a dendrogram of the similarities between data on the axes
Parameters
----------
ax : matplotlib.axes.Axes
Axes object upon which the dendrogram is plotted
"""
tree_kws = {} if tree_kws is None else tree_kws.copy()
tree_kws.setdefault("linewidths", .5)
tree_kws.setdefault("colors", tree_kws.pop("color", (.2, .2, .2)))
if self.rotate and self.axis == 0:
coords = zip(self.dependent_coord, self.independent_coord)
else:
coords = zip(self.independent_coord, self.dependent_coord)
lines = LineCollection([list(zip(x, y)) for x, y in coords],
**tree_kws)
ax.add_collection(lines)
number_of_leaves = len(self.reordered_ind)
max_dependent_coord = max(map(max, self.dependent_coord))
if self.rotate:
ax.yaxis.set_ticks_position('right')
# Constants 10 and 1.05 come from
# `scipy.cluster.hierarchy._plot_dendrogram`
ax.set_ylim(0, number_of_leaves * 10)
ax.set_xlim(0, max_dependent_coord * 1.05)
ax.invert_xaxis()
ax.invert_yaxis()
else:
# Constants 10 and 1.05 come from
# `scipy.cluster.hierarchy._plot_dendrogram`
ax.set_xlim(0, number_of_leaves * 10)
ax.set_ylim(0, max_dependent_coord * 1.05)
despine(ax=ax, bottom=True, left=True)
ax.set(xticks=self.xticks, yticks=self.yticks,
xlabel=self.xlabel, ylabel=self.ylabel)
xtl = ax.set_xticklabels(self.xticklabels)
ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')
# Force a draw of the plot to avoid matplotlib window error
_draw_figure(ax.figure)
if len(ytl) > 0 and axis_ticklabels_overlap(ytl):
plt.setp(ytl, rotation="horizontal")
if len(xtl) > 0 and axis_ticklabels_overlap(xtl):
plt.setp(xtl, rotation="vertical")
return self
def dendrogram(
data, *,
linkage=None, axis=1, label=True, metric='euclidean',
method='average', rotate=False, tree_kws=None, ax=None
):
"""Draw a tree diagram of relationships within a matrix
Parameters
----------
data : pandas.DataFrame
Rectangular data
linkage : numpy.array, optional
Linkage matrix
axis : int, optional
Which axis to use to calculate linkage. 0 is rows, 1 is columns.
label : bool, optional
If True, label the dendrogram at leaves with column or row names
metric : str, optional
Distance metric. Anything valid for scipy.spatial.distance.pdist
method : str, optional
Linkage method to use. Anything valid for
scipy.cluster.hierarchy.linkage
rotate : bool, optional
When plotting the matrix, whether to rotate it 90 degrees
counter-clockwise, so the leaves face right
tree_kws : dict, optional
Keyword arguments for the ``matplotlib.collections.LineCollection``
that is used for plotting the lines of the dendrogram tree.
ax : matplotlib axis, optional
Axis to plot on, otherwise uses current axis
Returns
-------
dendrogramplotter : _DendrogramPlotter
A Dendrogram plotter object.
Notes
-----
Access the reordered dendrogram indices with
dendrogramplotter.reordered_ind
"""
if _no_scipy:
raise RuntimeError("dendrogram requires scipy to be installed")
plotter = _DendrogramPlotter(data, linkage=linkage, axis=axis,
metric=metric, method=method,
label=label, rotate=rotate)
if ax is None:
ax = plt.gca()
return plotter.plot(ax=ax, tree_kws=tree_kws)
class ClusterGrid(Grid):
def __init__(self, data, pivot_kws=None, z_score=None, standard_scale=None,
figsize=None, row_colors=None, col_colors=None, mask=None,
dendrogram_ratio=None, colors_ratio=None, cbar_pos=None):
"""Grid object for organizing clustered heatmap input on to axes"""
if _no_scipy:
raise RuntimeError("ClusterGrid requires scipy to be available")
if isinstance(data, pd.DataFrame):
self.data = data
else:
self.data = pd.DataFrame(data)
self.data2d = self.format_data(self.data, pivot_kws, z_score,
standard_scale)
self.mask = _matrix_mask(self.data2d, mask)
self._figure = plt.figure(figsize=figsize)
self.row_colors, self.row_color_labels = \
self._preprocess_colors(data, row_colors, axis=0)
self.col_colors, self.col_color_labels = \
self._preprocess_colors(data, col_colors, axis=1)
try:
row_dendrogram_ratio, col_dendrogram_ratio = dendrogram_ratio
except TypeError:
row_dendrogram_ratio = col_dendrogram_ratio = dendrogram_ratio
try:
row_colors_ratio, col_colors_ratio = colors_ratio
except TypeError:
row_colors_ratio = col_colors_ratio = colors_ratio
width_ratios = self.dim_ratios(self.row_colors,
row_dendrogram_ratio,
row_colors_ratio)
height_ratios = self.dim_ratios(self.col_colors,
col_dendrogram_ratio,
col_colors_ratio)
nrows = 2 if self.col_colors is None else 3
ncols = 2 if self.row_colors is None else 3
self.gs = gridspec.GridSpec(nrows, ncols,
width_ratios=width_ratios,
height_ratios=height_ratios)
self.ax_row_dendrogram = self._figure.add_subplot(self.gs[-1, 0])
self.ax_col_dendrogram = self._figure.add_subplot(self.gs[0, -1])
self.ax_row_dendrogram.set_axis_off()
self.ax_col_dendrogram.set_axis_off()
self.ax_row_colors = None
self.ax_col_colors = None
if self.row_colors is not None:
self.ax_row_colors = self._figure.add_subplot(
self.gs[-1, 1])
if self.col_colors is not None:
self.ax_col_colors = self._figure.add_subplot(
self.gs[1, -1])
self.ax_heatmap = self._figure.add_subplot(self.gs[-1, -1])
if cbar_pos is None:
self.ax_cbar = self.cax = None
else:
# Initialize the colorbar axes in the gridspec so that tight_layout
# works. We will move it where it belongs later. This is a hack.
self.ax_cbar = self._figure.add_subplot(self.gs[0, 0])
self.cax = self.ax_cbar # Backwards compatibility
self.cbar_pos = cbar_pos
self.dendrogram_row = None
self.dendrogram_col = None
def _preprocess_colors(self, data, colors, axis):
"""Preprocess {row/col}_colors to extract labels and convert colors."""
labels = None
if colors is not None:
if isinstance(colors, (pd.DataFrame, pd.Series)):
# If data is unindexed, raise
if (not hasattr(data, "index") and axis == 0) or (
not hasattr(data, "columns") and axis == 1
):
axis_name = "col" if axis else "row"
msg = (f"{axis_name}_colors indices can't be matched with data "
f"indices. Provide {axis_name}_colors as a non-indexed "
"datatype, e.g. by using `.to_numpy()``")
raise TypeError(msg)
# Ensure colors match data indices
if axis == 0:
colors = colors.reindex(data.index)
else:
colors = colors.reindex(data.columns)
# Replace na's with white color
# TODO We should set these to transparent instead
colors = colors.astype(object).fillna('white')
# Extract color values and labels from frame/series
if isinstance(colors, pd.DataFrame):
labels = list(colors.columns)
colors = colors.T.values
else:
if colors.name is None:
labels = [""]
else:
labels = [colors.name]
colors = colors.values
colors = _convert_colors(colors)
return colors, labels
def format_data(self, data, pivot_kws, z_score=None,
standard_scale=None):
"""Extract variables from data or use directly."""
# Either the data is already in 2d matrix format, or need to do a pivot
if pivot_kws is not None:
data2d = data.pivot(**pivot_kws)
else:
data2d = data
if z_score is not None and standard_scale is not None:
raise ValueError(
'Cannot perform both z-scoring and standard-scaling on data')
if z_score is not None:
data2d = self.z_score(data2d, z_score)
if standard_scale is not None:
data2d = self.standard_scale(data2d, standard_scale)
return data2d
@staticmethod
def z_score(data2d, axis=1):
"""Standarize the mean and variance of the data axis
Parameters
----------
data2d : pandas.DataFrame
Data to normalize
axis : int
Which axis to normalize across. If 0, normalize across rows, if 1,
normalize across columns.
Returns
-------
normalized : pandas.DataFrame
Noramlized data with a mean of 0 and variance of 1 across the
specified axis.
"""
if axis == 1:
z_scored = data2d
else:
z_scored = data2d.T
z_scored = (z_scored - z_scored.mean()) / z_scored.std()
if axis == 1:
return z_scored
else:
return z_scored.T
@staticmethod
def standard_scale(data2d, axis=1):
"""Divide the data by the difference between the max and min
Parameters
----------
data2d : pandas.DataFrame
Data to normalize
axis : int
Which axis to normalize across. If 0, normalize across rows, if 1,
normalize across columns.
Returns
-------
standardized : pandas.DataFrame
Noramlized data with a mean of 0 and variance of 1 across the
specified axis.
"""
# Normalize these values to range from 0 to 1
if axis == 1:
standardized = data2d
else:
standardized = data2d.T
subtract = standardized.min()
standardized = (standardized - subtract) / (
standardized.max() - standardized.min())
if axis == 1:
return standardized
else:
return standardized.T
def dim_ratios(self, colors, dendrogram_ratio, colors_ratio):
"""Get the proportions of the figure taken up by each axes."""
ratios = [dendrogram_ratio]
if colors is not None:
# Colors are encoded as rgb, so there is an extra dimension
if np.ndim(colors) > 2:
n_colors = len(colors)
else:
n_colors = 1
ratios += [n_colors * colors_ratio]
# Add the ratio for the heatmap itself
ratios.append(1 - sum(ratios))
return ratios
@staticmethod
def color_list_to_matrix_and_cmap(colors, ind, axis=0):
"""Turns a list of colors into a numpy matrix and matplotlib colormap
These arguments can now be plotted using heatmap(matrix, cmap)
and the provided colors will be plotted.
Parameters
----------
colors : list of matplotlib colors
Colors to label the rows or columns of a dataframe.
ind : list of ints
Ordering of the rows or columns, to reorder the original colors
by the clustered dendrogram order
axis : int
Which axis this is labeling
Returns
-------
matrix : numpy.array
A numpy array of integer values, where each indexes into the cmap
cmap : matplotlib.colors.ListedColormap
"""
try:
mpl.colors.to_rgb(colors[0])
except ValueError:
# We have a 2D color structure
m, n = len(colors), len(colors[0])
if not all(len(c) == n for c in colors[1:]):
raise ValueError("Multiple side color vectors must have same size")
else:
# We have one vector of colors
m, n = 1, len(colors)
colors = [colors]
# Map from unique colors to colormap index value
unique_colors = {}
matrix = np.zeros((m, n), int)
for i, inner in enumerate(colors):
for j, color in enumerate(inner):
idx = unique_colors.setdefault(color, len(unique_colors))
matrix[i, j] = idx
# Reorder for clustering and transpose for axis
matrix = matrix[:, ind]
if axis == 0:
matrix = matrix.T
cmap = mpl.colors.ListedColormap(list(unique_colors))
return matrix, cmap
def plot_dendrograms(self, row_cluster, col_cluster, metric, method,
row_linkage, col_linkage, tree_kws):
# Plot the row dendrogram
if row_cluster:
self.dendrogram_row = dendrogram(
self.data2d, metric=metric, method=method, label=False, axis=0,
ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,
tree_kws=tree_kws
)
else:
self.ax_row_dendrogram.set_xticks([])
self.ax_row_dendrogram.set_yticks([])
# PLot the column dendrogram
if col_cluster:
self.dendrogram_col = dendrogram(
self.data2d, metric=metric, method=method, label=False,
axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,
tree_kws=tree_kws
)
else:
self.ax_col_dendrogram.set_xticks([])
self.ax_col_dendrogram.set_yticks([])
despine(ax=self.ax_row_dendrogram, bottom=True, left=True)
despine(ax=self.ax_col_dendrogram, bottom=True, left=True)
def plot_colors(self, xind, yind, **kws):
"""Plots color labels between the dendrogram and the heatmap
Parameters
----------
heatmap_kws : dict
Keyword arguments heatmap
"""
# Remove any custom colormap and centering
# TODO this code has consistently caused problems when we
# have missed kwargs that need to be excluded that it might
# be better to rewrite *in*clusively.
kws = kws.copy()
kws.pop('cmap', None)
kws.pop('norm', None)
kws.pop('center', None)
kws.pop('annot', None)
kws.pop('vmin', None)
kws.pop('vmax', None)
kws.pop('robust', None)
kws.pop('xticklabels', None)
kws.pop('yticklabels', None)
# Plot the row colors
if self.row_colors is not None:
matrix, cmap = self.color_list_to_matrix_and_cmap(
self.row_colors, yind, axis=0)
# Get row_color labels
if self.row_color_labels is not None:
row_color_labels = self.row_color_labels
else:
row_color_labels = False
heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,
xticklabels=row_color_labels, yticklabels=False, **kws)
# Adjust rotation of labels
if row_color_labels is not False:
plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)
else:
despine(self.ax_row_colors, left=True, bottom=True)
# Plot the column colors
if self.col_colors is not None:
matrix, cmap = self.color_list_to_matrix_and_cmap(
self.col_colors, xind, axis=1)
# Get col_color labels
if self.col_color_labels is not None:
col_color_labels = self.col_color_labels
else:
col_color_labels = False
heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,
xticklabels=False, yticklabels=col_color_labels, **kws)
# Adjust rotation of labels, place on right side
if col_color_labels is not False:
self.ax_col_colors.yaxis.tick_right()
plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)
else:
despine(self.ax_col_colors, left=True, bottom=True)
def plot_matrix(self, colorbar_kws, xind, yind, **kws):
self.data2d = self.data2d.iloc[yind, xind]
self.mask = self.mask.iloc[yind, xind]
# Try to reorganize specified tick labels, if provided
xtl = kws.pop("xticklabels", "auto")
try:
xtl = np.asarray(xtl)[xind]
except (TypeError, IndexError):
pass
ytl = kws.pop("yticklabels", "auto")
try:
ytl = np.asarray(ytl)[yind]
except (TypeError, IndexError):
pass
# Reorganize the annotations to match the heatmap
annot = kws.pop("annot", None)
if annot is None or annot is False:
pass
else:
if isinstance(annot, bool):
annot_data = self.data2d
else:
annot_data = np.asarray(annot)
if annot_data.shape != self.data2d.shape:
err = "`data` and `annot` must have same shape."
raise ValueError(err)
annot_data = annot_data[yind][:, xind]
annot = annot_data
# Setting ax_cbar=None in clustermap call implies no colorbar
kws.setdefault("cbar", self.ax_cbar is not None)
heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,
cbar_kws=colorbar_kws, mask=self.mask,
xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)
ytl = self.ax_heatmap.get_yticklabels()
ytl_rot = None if not ytl else ytl[0].get_rotation()
self.ax_heatmap.yaxis.set_ticks_position('right')
self.ax_heatmap.yaxis.set_label_position('right')
if ytl_rot is not None:
ytl = self.ax_heatmap.get_yticklabels()
plt.setp(ytl, rotation=ytl_rot)
tight_params = dict(h_pad=.02, w_pad=.02)
if self.ax_cbar is None:
self._figure.tight_layout(**tight_params)
else:
# Turn the colorbar axes off for tight layout so that its
# ticks don't interfere with the rest of the plot layout.
# Then move it.
self.ax_cbar.set_axis_off()
self._figure.tight_layout(**tight_params)
self.ax_cbar.set_axis_on()
self.ax_cbar.set_position(self.cbar_pos)
def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,
row_linkage, col_linkage, tree_kws, **kws):
# heatmap square=True sets the aspect ratio on the axes, but that is
# not compatible with the multi-axes layout of clustergrid
if kws.get("square", False):
msg = "``square=True`` ignored in clustermap"
warnings.warn(msg)
kws.pop("square")
colorbar_kws = {} if colorbar_kws is None else colorbar_kws
self.plot_dendrograms(row_cluster, col_cluster, metric, method,
row_linkage=row_linkage, col_linkage=col_linkage,
tree_kws=tree_kws)
try:
xind = self.dendrogram_col.reordered_ind
except AttributeError:
xind = np.arange(self.data2d.shape[1])
try:
yind = self.dendrogram_row.reordered_ind
except AttributeError:
yind = np.arange(self.data2d.shape[0])
self.plot_colors(xind, yind, **kws)
self.plot_matrix(colorbar_kws, xind, yind, **kws)
return self
def clustermap(
data, *,
pivot_kws=None, method='average', metric='euclidean',
z_score=None, standard_scale=None, figsize=(10, 10),
cbar_kws=None, row_cluster=True, col_cluster=True,
row_linkage=None, col_linkage=None,
row_colors=None, col_colors=None, mask=None,
dendrogram_ratio=.2, colors_ratio=0.03,
cbar_pos=(.02, .8, .05, .18), tree_kws=None,
**kwargs
):
"""
Plot a matrix dataset as a hierarchically-clustered heatmap.
This function requires scipy to be available.
Parameters
----------
data : 2D array-like
Rectangular data for clustering. Cannot contain NAs.
pivot_kws : dict, optional
If `data` is a tidy dataframe, can provide keyword arguments for
pivot to create a rectangular dataframe.
method : str, optional
Linkage method to use for calculating clusters. See
:func:`scipy.cluster.hierarchy.linkage` documentation for more
information.
metric : str, optional
Distance metric to use for the data. See
:func:`scipy.spatial.distance.pdist` documentation for more options.
To use different metrics (or methods) for rows and columns, you may
construct each linkage matrix yourself and provide them as
`{row,col}_linkage`.
z_score : int or None, optional
Either 0 (rows) or 1 (columns). Whether or not to calculate z-scores
for the rows or the columns. Z scores are: z = (x - mean)/std, so
values in each row (column) will get the mean of the row (column)
subtracted, then divided by the standard deviation of the row (column).
This ensures that each row (column) has mean of 0 and variance of 1.
standard_scale : int or None, optional
Either 0 (rows) or 1 (columns). Whether or not to standardize that
dimension, meaning for each row or column, subtract the minimum and
divide each by its maximum.
figsize : tuple of (width, height), optional
Overall size of the figure.
cbar_kws : dict, optional
Keyword arguments to pass to `cbar_kws` in :func:`heatmap`, e.g. to
add a label to the colorbar.
{row,col}_cluster : bool, optional
If ``True``, cluster the {rows, columns}.
{row,col}_linkage : :class:`numpy.ndarray`, optional
Precomputed linkage matrix for the rows or columns. See
:func:`scipy.cluster.hierarchy.linkage` for specific formats.
{row,col}_colors : list-like or pandas DataFrame/Series, optional
List of colors to label for either the rows or columns. Useful to evaluate
whether samples within a group are clustered together. Can use nested lists or
DataFrame for multiple color levels of labeling. If given as a
:class:`pandas.DataFrame` or :class:`pandas.Series`, labels for the colors are
extracted from the DataFrames column names or from the name of the Series.
DataFrame/Series colors are also matched to the data by their index, ensuring
colors are drawn in the correct order.
mask : bool array or DataFrame, optional
If passed, data will not be shown in cells where `mask` is True.
Cells with missing values are automatically masked. Only used for
visualizing, not for calculating.
{dendrogram,colors}_ratio : float, or pair of floats, optional
Proportion of the figure size devoted to the two marginal elements. If
a pair is given, they correspond to (row, col) ratios.
cbar_pos : tuple of (left, bottom, width, height), optional
Position of the colorbar axes in the figure. Setting to ``None`` will
disable the colorbar.
tree_kws : dict, optional
Parameters for the :class:`matplotlib.collections.LineCollection`
that is used to plot the lines of the dendrogram tree.
kwargs : other keyword arguments
All other keyword arguments are passed to :func:`heatmap`.
Returns
-------
:class:`ClusterGrid`
A :class:`ClusterGrid` instance.
See Also
--------
heatmap : Plot rectangular data as a color-encoded matrix.
Notes
-----
The returned object has a ``savefig`` method that should be used if you
want to save the figure object without clipping the dendrograms.
To access the reordered row indices, use:
``clustergrid.dendrogram_row.reordered_ind``
Column indices, use:
``clustergrid.dendrogram_col.reordered_ind``
Examples
--------
Plot a clustered heatmap:
.. plot::
:context: close-figs
>>> import seaborn as sns; sns.set_theme(color_codes=True)
>>> iris = sns.load_dataset("iris")
>>> species = iris.pop("species")
>>> g = sns.clustermap(iris)
Change the size and layout of the figure:
.. plot::
:context: close-figs
>>> g = sns.clustermap(iris,
... figsize=(7, 5),
... row_cluster=False,
... dendrogram_ratio=(.1, .2),
... cbar_pos=(0, .2, .03, .4))
Add colored labels to identify observations:
.. plot::
:context: close-figs
>>> lut = dict(zip(species.unique(), "rbg"))
>>> row_colors = species.map(lut)
>>> g = sns.clustermap(iris, row_colors=row_colors)
Use a different colormap and adjust the limits of the color range:
.. plot::
:context: close-figs
>>> g = sns.clustermap(iris, cmap="mako", vmin=0, vmax=10)
Use a different similarity metric:
.. plot::
:context: close-figs
>>> g = sns.clustermap(iris, metric="correlation")
Use a different clustering method:
.. plot::
:context: close-figs
>>> g = sns.clustermap(iris, method="single")
Standardize the data within the columns:
.. plot::
:context: close-figs
>>> g = sns.clustermap(iris, standard_scale=1)
Normalize the data within the rows:
.. plot::
:context: close-figs
>>> g = sns.clustermap(iris, z_score=0, cmap="vlag")
"""
if _no_scipy:
raise RuntimeError("clustermap requires scipy to be available")
plotter = ClusterGrid(data, pivot_kws=pivot_kws, figsize=figsize,
row_colors=row_colors, col_colors=col_colors,
z_score=z_score, standard_scale=standard_scale,
mask=mask, dendrogram_ratio=dendrogram_ratio,
colors_ratio=colors_ratio, cbar_pos=cbar_pos)
return plotter.plot(metric=metric, method=method,
colorbar_kws=cbar_kws,
row_cluster=row_cluster, col_cluster=col_cluster,
row_linkage=row_linkage, col_linkage=col_linkage,
tree_kws=tree_kws, **kwargs)
| [
"numpy.nanpercentile",
"matplotlib.cm.get_cmap",
"scipy.cluster.hierarchy.linkage",
"matplotlib.pyplot.figure",
"numpy.product",
"numpy.arange",
"matplotlib.pyplot.gca",
"matplotlib.colors.ListedColormap",
"pandas.DataFrame",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.setp",
"numpy.ndim... | [((1723, 1749), 'numpy.zeros', 'np.zeros', (['data.shape', 'bool'], {}), '(data.shape, bool)\n', (1731, 1749), True, 'import numpy as np\n'), ((1983, 2053), 'pandas.DataFrame', 'pd.DataFrame', (['mask'], {'index': 'data.index', 'columns': 'data.columns', 'dtype': 'bool'}), '(mask, index=data.index, columns=data.columns, dtype=bool)\n', (1995, 2053), True, 'import pandas as pd\n'), ((2623, 2638), 'pandas.isnull', 'pd.isnull', (['data'], {}), '(data)\n', (2632, 2638), True, 'import pandas as pd\n'), ((12197, 12223), 'matplotlib.pyplot.setp', 'plt.setp', (['ytl'], {'va': '"""center"""'}), "(ytl, va='center')\n", (12205, 12223), True, 'import matplotlib.pyplot as plt\n'), ((19589, 19598), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (19596, 19598), True, 'import matplotlib.pyplot as plt\n'), ((21890, 21959), 'scipy.cluster.hierarchy.linkage', 'hierarchy.linkage', (['self.array'], {'method': 'self.method', 'metric': 'self.metric'}), '(self.array, method=self.method, metric=self.metric)\n', (21907, 21959), False, 'from scipy.cluster import hierarchy\n'), ((23826, 23899), 'scipy.cluster.hierarchy.dendrogram', 'hierarchy.dendrogram', (['self.linkage'], {'no_plot': '(True)', 'color_threshold': '(-np.inf)'}), '(self.linkage, no_plot=True, color_threshold=-np.inf)\n', (23846, 23899), False, 'from scipy.cluster import hierarchy\n'), ((27883, 27892), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (27890, 27892), True, 'import matplotlib.pyplot as plt\n'), ((28711, 28738), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (28721, 28738), True, 'import matplotlib.pyplot as plt\n'), ((29796, 29888), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['nrows', 'ncols'], {'width_ratios': 'width_ratios', 'height_ratios': 'height_ratios'}), '(nrows, ncols, width_ratios=width_ratios, height_ratios=\n height_ratios)\n', (29813, 29888), False, 'from matplotlib import gridspec\n'), ((37195, 37216), 'numpy.zeros', 'np.zeros', (['(m, n)', 'int'], {}), '((m, n), int)\n', (37203, 37216), True, 'import numpy as np\n'), ((3217, 3233), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (3227, 3233), True, 'import numpy as np\n'), ((3253, 3276), 'pandas.DataFrame', 'pd.DataFrame', (['plot_data'], {}), '(plot_data)\n', (3265, 3276), True, 'import pandas as pd\n'), ((3411, 3427), 'numpy.asarray', 'np.asarray', (['mask'], {}), '(mask)\n', (3421, 3427), True, 'import numpy as np\n'), ((8049, 8103), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', (['(center - vrange)', '(center + vrange)'], {}), '(center - vrange, center + vrange)\n', (8069, 8103), True, 'import matplotlib as mpl\n'), ((8169, 8197), 'numpy.linspace', 'np.linspace', (['cmin', 'cmax', '(256)'], {}), '(cmin, cmax, 256)\n', (8180, 8197), True, 'import numpy as np\n'), ((12368, 12402), 'matplotlib.pyplot.setp', 'plt.setp', (['xtl'], {'rotation': '"""vertical"""'}), "(xtl, rotation='vertical')\n", (12376, 12402), True, 'import matplotlib.pyplot as plt\n'), ((12456, 12492), 'matplotlib.pyplot.setp', 'plt.setp', (['ytl'], {'rotation': '"""horizontal"""'}), "(ytl, rotation='horizontal')\n", (12464, 12492), True, 'import matplotlib.pyplot as plt\n'), ((20256, 20272), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (20266, 20272), True, 'import numpy as np\n'), ((20292, 20311), 'pandas.DataFrame', 'pd.DataFrame', (['array'], {}), '(array)\n', (20304, 20311), True, 'import pandas as pd\n'), ((22530, 22608), 'fastcluster.linkage_vector', 'fastcluster.linkage_vector', (['self.array'], {'method': 'self.method', 'metric': 'self.metric'}), '(self.array, method=self.method, metric=self.metric)\n', (22556, 22608), False, 'import fastcluster\n'), ((22737, 22808), 'fastcluster.linkage', 'fastcluster.linkage', (['self.array'], {'method': 'self.method', 'metric': 'self.metric'}), '(self.array, method=self.method, metric=self.metric)\n', (22756, 22808), False, 'import fastcluster\n'), ((26010, 26046), 'matplotlib.pyplot.setp', 'plt.setp', (['ytl'], {'rotation': '"""horizontal"""'}), "(ytl, rotation='horizontal')\n", (26018, 26046), True, 'import matplotlib.pyplot as plt\n'), ((26117, 26151), 'matplotlib.pyplot.setp', 'plt.setp', (['xtl'], {'rotation': '"""vertical"""'}), "(xtl, rotation='vertical')\n", (26125, 26151), True, 'import matplotlib.pyplot as plt\n'), ((28489, 28507), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (28501, 28507), True, 'import pandas as pd\n'), ((36685, 36713), 'matplotlib.colors.to_rgb', 'mpl.colors.to_rgb', (['colors[0]'], {}), '(colors[0])\n', (36702, 36713), True, 'import matplotlib as mpl\n'), ((42911, 42942), 'matplotlib.pyplot.setp', 'plt.setp', (['ytl'], {'rotation': 'ytl_rot'}), '(ytl, rotation=ytl_rot)\n', (42919, 42942), True, 'import matplotlib.pyplot as plt\n'), ((43834, 43852), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (43847, 43852), False, 'import warnings\n'), ((5691, 5708), 'numpy.asarray', 'np.asarray', (['annot'], {}), '(annot)\n', (5701, 5708), True, 'import numpy as np\n'), ((6632, 6662), 'numpy.nanpercentile', 'np.nanpercentile', (['calc_data', '(2)'], {}), '(calc_data, 2)\n', (6648, 6662), True, 'import numpy as np\n'), ((6704, 6724), 'numpy.nanmin', 'np.nanmin', (['calc_data'], {}), '(calc_data)\n', (6713, 6724), True, 'import numpy as np\n'), ((6796, 6827), 'numpy.nanpercentile', 'np.nanpercentile', (['calc_data', '(98)'], {}), '(calc_data, 98)\n', (6812, 6827), True, 'import numpy as np\n'), ((6869, 6889), 'numpy.nanmax', 'np.nanmax', (['calc_data'], {}), '(calc_data)\n', (6878, 6889), True, 'import numpy as np\n'), ((7195, 7216), 'matplotlib.cm.get_cmap', 'mpl.cm.get_cmap', (['cmap'], {}), '(cmap)\n', (7210, 7216), True, 'import matplotlib as mpl\n'), ((8655, 8671), 'numpy.arange', 'np.arange', (['width'], {}), '(width)\n', (8664, 8671), True, 'import numpy as np\n'), ((8678, 8695), 'numpy.arange', 'np.arange', (['height'], {}), '(height)\n', (8687, 8695), True, 'import numpy as np\n'), ((20817, 20846), 'numpy.arange', 'np.arange', (['self.data.shape[0]'], {}), '(self.data.shape[0])\n', (20826, 20846), True, 'import numpy as np\n'), ((35561, 35576), 'numpy.ndim', 'np.ndim', (['colors'], {}), '(colors)\n', (35568, 35576), True, 'import numpy as np\n'), ((41457, 41472), 'numpy.asarray', 'np.asarray', (['xtl'], {}), '(xtl)\n', (41467, 41472), True, 'import numpy as np\n'), ((41612, 41627), 'numpy.asarray', 'np.asarray', (['ytl'], {}), '(ytl)\n', (41622, 41627), True, 'import numpy as np\n'), ((41992, 42009), 'numpy.asarray', 'np.asarray', (['annot'], {}), '(annot)\n', (42002, 42009), True, 'import numpy as np\n'), ((44270, 44301), 'numpy.arange', 'np.arange', (['self.data2d.shape[1]'], {}), '(self.data2d.shape[1])\n', (44279, 44301), True, 'import numpy as np\n'), ((44418, 44449), 'numpy.arange', 'np.arange', (['self.data2d.shape[0]'], {}), '(self.data2d.shape[0])\n', (44427, 44449), True, 'import numpy as np\n'), ((7278, 7309), 'matplotlib.colors.ListedColormap', 'mpl.colors.ListedColormap', (['cmap'], {}), '(cmap)\n', (7303, 7309), True, 'import matplotlib as mpl\n'), ((7634, 7664), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['[np.nan]'], {}), '([np.nan])\n', (7654, 7664), True, 'import numpy as np\n'), ((9664, 9691), 'numpy.arange', 'np.arange', (['start', 'end', 'step'], {}), '(start, end, step)\n', (9673, 9691), True, 'import numpy as np\n'), ((23041, 23063), 'numpy.product', 'np.product', (['self.shape'], {}), '(self.shape)\n', (23051, 23063), True, 'import numpy as np\n'), ((23232, 23250), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (23245, 23250), False, 'import warnings\n'), ((9557, 9569), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (9566, 9569), True, 'import numpy as np\n')] |
""" Logging to Visdom server """
import numpy as np
import visdom
from .logger import Logger
class BaseVisdomLogger(Logger):
'''
The base class for logging output to Visdom.
***THIS CLASS IS ABSTRACT AND MUST BE SUBCLASSED***
Note that the Visdom server is designed to also handle a server architecture,
and therefore the Visdom server must be running at all times. The server can
be started with
$ python -m visdom.server
and you probably want to run it from screen or tmux.
'''
@property
def viz(self):
return self._viz
def __init__(self, fields=None, win=None, env=None, opts={}, port=8097, server="localhost"):
super(BaseVisdomLogger, self).__init__(fields)
self.win = win
self.env = env
self.opts = opts
self._viz = visdom.Visdom(server="http://" + server, port=port)
def log(self, *args, **kwargs):
raise NotImplementedError(
"log not implemented for BaseVisdomLogger, which is an abstract class.")
def _viz_prototype(self, vis_fn):
''' Outputs a function which will log the arguments to Visdom in an appropriate way.
Args:
vis_fn: A function, such as self.vis.image
'''
def _viz_logger(*args, **kwargs):
self.win = vis_fn(*args,
win=self.win,
env=self.env,
opts=self.opts,
**kwargs)
return _viz_logger
def log_state(self, state):
""" Gathers the stats from self.trainer.stats and passes them into
self.log, as a list """
results = []
for field_idx, field in enumerate(self.fields):
parent, stat = None, state
for f in field:
parent, stat = stat, stat[f]
results.append(stat)
self.log(*results)
class VisdomSaver(object):
''' Serialize the state of the Visdom server to disk.
Unless you have a fancy schedule, where different are saved with different frequencies,
you probably only need one of these.
'''
def __init__(self, envs=None, port=8097, server="localhost"):
super(VisdomSaver, self).__init__()
self.envs = envs
self.viz = visdom.Visdom(server="http://" + server, port=port)
def save(self, *args, **kwargs):
self.viz.save(self.envs)
class VisdomLogger(BaseVisdomLogger):
'''
A generic Visdom class that works with the majority of Visdom plot types.
'''
def __init__(self, plot_type, fields=None, win=None, env=None, opts={}, port=8097, server="localhost"):
'''
Args:
fields: Currently unused
plot_type: The name of the plot type, in Visdom
Examples:
>>> # Image example
>>> img_to_use = skimage.data.coffee().swapaxes(0,2).swapaxes(1,2)
>>> image_logger = VisdomLogger('image')
>>> image_logger.log(img_to_use)
>>> # Histogram example
>>> hist_data = np.random.rand(10000)
>>> hist_logger = VisdomLogger('histogram', , opts=dict(title='Random!', numbins=20))
>>> hist_logger.log(hist_data)
'''
super(VisdomLogger, self).__init__(fields, win, env, opts, port, server)
self.plot_type = plot_type
self.chart = getattr(self.viz, plot_type)
self.viz_logger = self._viz_prototype(self.chart)
def log(self, *args, **kwargs):
self.viz_logger(*args, **kwargs)
class VisdomPlotLogger(BaseVisdomLogger):
def __init__(self, plot_type, fields=None, win=None, env=None, opts={}, port=8097, server="localhost", name=None):
'''
Multiple lines can be added to the same plot with the "name" attribute (see example)
Args:
fields: Currently unused
plot_type: {scatter, line}
Examples:
>>> scatter_logger = VisdomPlotLogger('line')
>>> scatter_logger.log(stats['epoch'], loss_meter.value()[0], name="train")
>>> scatter_logger.log(stats['epoch'], loss_meter.value()[0], name="test")
'''
super(VisdomPlotLogger, self).__init__(fields, win, env, opts, port, server)
valid_plot_types = {
"scatter": self.viz.scatter,
"line": self.viz.line}
self.plot_type = plot_type
# Set chart type
if plot_type not in valid_plot_types.keys():
raise ValueError("plot_type \'{}\' not found. Must be one of {}".format(
plot_type, valid_plot_types.keys()))
self.chart = valid_plot_types[plot_type]
def log(self, *args, **kwargs):
if self.win is not None and self.viz.win_exists(win=self.win, env=self.env):
if len(args) != 2:
raise ValueError("When logging to {}, must pass in x and y values (and optionally z).".format(
type(self)))
x, y = args
self.chart(
X=np.array([x]),
Y=np.array([y]),
update='append',
win=self.win,
env=self.env,
opts=self.opts,
**kwargs)
else:
if self.plot_type == 'scatter':
chart_args = {'X': np.array([args])}
else:
chart_args = {'X': np.array([args[0]]),
'Y': np.array([args[1]])}
self.win = self.chart(
win=self.win,
env=self.env,
opts=self.opts,
**chart_args)
# For some reason, the first point is a different trace. So for now
# we can just add the point again, this time on the correct curve.
self.log(*args, **kwargs)
class VisdomTextLogger(BaseVisdomLogger):
'''Creates a text window in visdom and logs output to it.
The output can be formatted with fancy HTML, and it new output can
be set to 'append' or 'replace' mode.
Args:
fields: Currently not used
update_type: One of {'REPLACE', 'APPEND'}. Default 'REPLACE'.
For examples, make sure that your visdom server is running.
Example:
>>> notes_logger = VisdomTextLogger(update_type='APPEND')
>>> for i in range(10):
>>> notes_logger.log("Printing: {} of {}".format(i+1, 10))
# results will be in Visdom environment (default: http://localhost:8097)
'''
valid_update_types = ['REPLACE', 'APPEND']
def __init__(self, fields=None, win=None, env=None, opts={}, update_type=valid_update_types[0],
port=8097, server="localhost"):
super(VisdomTextLogger, self).__init__(fields, win, env, opts, port, server)
self.text = ''
if update_type not in self.valid_update_types:
raise ValueError("update type '{}' not found. Must be one of {}".format(
update_type, self.valid_update_types))
self.update_type = update_type
self.viz_logger = self._viz_prototype(self.viz.text)
def log(self, msg, *args, **kwargs):
text = msg
if self.update_type == 'APPEND' and self.text:
self.text = "<br>".join([self.text, text])
else:
self.text = text
self.viz_logger([self.text])
def _log_all(self, stats, log_fields, prefix=None, suffix=None, require_dict=False):
results = []
for field_idx, field in enumerate(self.fields):
parent, stat = None, stats
for f in field:
parent, stat = stat, stat[f]
name, output = self._gather_outputs(field, log_fields,
parent, stat, require_dict)
if not output:
continue
self._align_output(field_idx, output)
results.append((name, output))
if not results:
return
output = self._join_results(results)
if prefix is not None:
self.log(prefix)
self.log(output)
if suffix is not None:
self.log(suffix)
def _align_output(self, field_idx, output):
for output_idx, o in enumerate(output):
if len(o) < self.field_widths[field_idx][output_idx]:
num_spaces = self.field_widths[field_idx][output_idx] - len(o)
output[output_idx] += ' ' * num_spaces
else:
self.field_widths[field_idx][output_idx] = len(o)
def _join_results(self, results):
joined_out = map(lambda i: (i[0], ' '.join(i[1])), results)
joined_fields = map(lambda i: '{}: {}'.format(i[0], i[1]), joined_out)
return '\t'.join(joined_fields)
def _gather_outputs(self, field, log_fields, stat_parent, stat, require_dict=False):
output = []
name = ''
if isinstance(stat, dict):
log_fields = stat.get(log_fields, [])
name = stat.get('log_name', '.'.join(field))
for f in log_fields:
output.append(f.format(**stat))
elif not require_dict:
name = '.'.join(field)
number_format = stat_parent.get('log_format', '')
unit = stat_parent.get('log_unit', '')
fmt = '{' + number_format + '}' + unit
output.append(fmt.format(stat))
return name, output
| [
"numpy.array",
"visdom.Visdom"
] | [((852, 903), 'visdom.Visdom', 'visdom.Visdom', ([], {'server': "('http://' + server)", 'port': 'port'}), "(server='http://' + server, port=port)\n", (865, 903), False, 'import visdom\n'), ((2347, 2398), 'visdom.Visdom', 'visdom.Visdom', ([], {'server': "('http://' + server)", 'port': 'port'}), "(server='http://' + server, port=port)\n", (2360, 2398), False, 'import visdom\n'), ((5166, 5179), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (5174, 5179), True, 'import numpy as np\n'), ((5199, 5212), 'numpy.array', 'np.array', (['[y]'], {}), '([y])\n', (5207, 5212), True, 'import numpy as np\n'), ((5458, 5474), 'numpy.array', 'np.array', (['[args]'], {}), '([args])\n', (5466, 5474), True, 'import numpy as np\n'), ((5529, 5548), 'numpy.array', 'np.array', (['[args[0]]'], {}), '([args[0]])\n', (5537, 5548), True, 'import numpy as np\n'), ((5585, 5604), 'numpy.array', 'np.array', (['[args[1]]'], {}), '([args[1]])\n', (5593, 5604), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from collections import *
import gym
from gym import spaces
import numpy as np
import pybullet as p
import sys
import time
np.set_printoptions(precision=3, suppress=True, linewidth=10000)
def add_opts(parser):
parser.add_argument('--gui', action='store_true')
parser.add_argument('--delay', type=float, default=0.0)
parser.add_argument('--action-force', type=float, default=50.0,
help="magnitude of action force applied per step")
parser.add_argument('--initial-force', type=float, default=55.0,
help="magnitude of initial push, in random direction")
parser.add_argument('--no-random-theta', action='store_true')
parser.add_argument('--action-repeats', type=int, default=2,
help="number of action repeats")
parser.add_argument('--steps-per-repeat', type=int, default=5,
help="number of sim steps per repeat")
parser.add_argument('--num-cameras', type=int, default=1,
help="how many camera points to render; 1 or 2")
parser.add_argument('--event-log-out', type=str, default=None,
help="path to record event log.")
parser.add_argument('--max-episode-len', type=int, default=200,
help="maximum episode len for cartpole")
parser.add_argument('--use-raw-pixels', action='store_true',
help="use raw pixels as state instead of cart/pole poses")
parser.add_argument('--render-width', type=int, default=50,
help="if --use-raw-pixels render with this width")
parser.add_argument('--render-height', type=int, default=50,
help="if --use-raw-pixels render with this height")
parser.add_argument('--reward-calc', type=str, default='fixed',
help="'fixed': 1 per step. 'angle': 2*max_angle - ox - oy. 'action': 1.5 - |action|. 'angle_action': both angle and action")
def state_fields_of_pose_of(body_id):
(x,y,z), (a,b,c,d) = p.getBasePositionAndOrientation(body_id)
return np.array([x,y,z,a,b,c,d])
class BulletCartpole(gym.Env):
def __init__(self, opts, discrete_actions):
self.gui = opts.gui
self.delay = opts.delay if self.gui else 0.0
self.max_episode_len = opts.max_episode_len
# threshold for pole position.
# if absolute x or y moves outside this we finish episode
self.pos_threshold = 2.0 # TODO: higher?
# threshold for angle from z-axis.
# if x or y > this value we finish episode.
self.angle_threshold = 0.3 # radians; ~= 12deg
# force to apply per action simulation step.
# in the discrete case this is the fixed force applied
# in the continuous case each x/y is in range (-F, F)
self.action_force = opts.action_force
# initial push force. this should be enough that taking no action will always
# result in pole falling after initial_force_steps but not so much that you
# can't recover. see also initial_force_steps.
self.initial_force = opts.initial_force
# number of sim steps initial force is applied.
# (see initial_force)
self.initial_force_steps = 30
# whether we do initial push in a random direction
# if false we always push with along x-axis (simplee problem, useful for debugging)
self.random_theta = not opts.no_random_theta
# true if action space is discrete; 5 values; no push, left, right, up & down
# false if action space is continuous; fx, fy both (-action_force, action_force)
self.discrete_actions = discrete_actions
# 5 discrete actions: no push, left, right, up, down
# 2 continuous action elements; fx & fy
if self.discrete_actions:
self.action_space = spaces.Discrete(5)
else:
self.action_space = spaces.Box(-1.0, 1.0, shape=(1, 2))
# open event log
if opts.event_log_out:
import event_log
self.event_log = event_log.EventLog(opts.event_log_out, opts.use_raw_pixels)
else:
self.event_log = None
# how many time to repeat each action per step().
# and how many sim steps to do per state capture
# (total number of sim steps = action_repeats * steps_per_repeat
self.repeats = opts.action_repeats
self.steps_per_repeat = opts.steps_per_repeat
# how many cameras to render?
# if 1 just render from front
# if 2 render from front and 90deg side
if opts.num_cameras not in [1, 2]:
raise ValueError("--num-cameras must be 1 or 2")
self.num_cameras = opts.num_cameras
# whether we are using raw pixels for state or just pole + cart pose
self.use_raw_pixels = opts.use_raw_pixels
# in the use_raw_pixels is set we will be rendering
self.render_width = opts.render_width
self.render_height = opts.render_height
# decide observation space
if self.use_raw_pixels:
# in high dimensional case each observation is an RGB images (H, W, 3)
# we have R repeats and C cameras resulting in (H, W, 3, R, C)
# final state fed to network is concatenated in depth => (H, W, 3*R*C)
state_shape = (self.render_height, self.render_width, 3,
self.num_cameras, self.repeats)
else:
# in the low dimensional case obs space for problem is (R, 2, 7)
# R = number of repeats
# 2 = two items; cart & pole
# 7d tuple for pos + orientation pose
state_shape = (self.repeats, 2, 7)
float_max = np.finfo(np.float32).max
self.observation_space = gym.spaces.Box(-float_max, float_max, state_shape)
# check reward type
assert opts.reward_calc in ['fixed', 'angle', 'action', 'angle_action']
self.reward_calc = opts.reward_calc
# no state until reset.
self.state = np.empty(state_shape, dtype=np.float32)
# setup bullet
p.connect(p.GUI if self.gui else p.DIRECT)
p.setGravity(0, 0, -9.81)
p.loadURDF("models/ground.urdf", 0,0,0, 0,0,0,1)
self.cart = p.loadURDF("models/cart.urdf", 0,0,0.08, 0,0,0,1)
self.pole = p.loadURDF("models/pole.urdf", 0,0,0.35, 0,0,0,1)
def _configure(self, display=None):
pass
def _seed(self, seed=None):
pass
def _render(self, mode, close):
pass
def _step(self, action):
if self.done:
print >>sys.stderr, "calling step after done????"
return np.copy(self.state), 0, True, {}
info = {}
# based on action decide the x and y forces
fx = fy = 0
if self.discrete_actions:
if action == 0:
pass
elif action == 1:
fx = self.action_force
elif action == 2:
fx = -self.action_force
elif action == 3:
fy = self.action_force
elif action == 4:
fy = -self.action_force
else:
raise Exception("unknown discrete action [%s]" % action)
else:
fx, fy = action[0] * self.action_force
# step simulation forward. at the end of each repeat we set part of the step's
# state by capture the cart & pole state in some form.
for r in xrange(self.repeats):
for _ in xrange(self.steps_per_repeat):
p.stepSimulation()
p.applyExternalForce(self.cart, -1, (fx,fy,0), (0,0,0), p.WORLD_FRAME)
if self.delay > 0:
time.sleep(self.delay)
self.set_state_element_for_repeat(r)
self.steps += 1
# Check for out of bounds by position or orientation on pole.
# we (re)fetch pose explicitly rather than depending on fields in state.
(x, y, _z), orient = p.getBasePositionAndOrientation(self.pole)
ox, oy, _oz = p.getEulerFromQuaternion(orient) # roll / pitch / yaw
if abs(x) > self.pos_threshold or abs(y) > self.pos_threshold:
info['done_reason'] = 'out of position bounds'
self.done = True
reward = 0.0
elif abs(ox) > self.angle_threshold or abs(oy) > self.angle_threshold:
# TODO: probably better to do explicit angle from z?
info['done_reason'] = 'out of orientation bounds'
self.done = True
reward = 0.0
# check for end of episode (by length)
if self.steps >= self.max_episode_len:
info['done_reason'] = 'episode length'
self.done = True
# calc reward, fixed base of 1.0
reward = 1.0
if self.reward_calc == "angle" or self.reward_calc == "angle_action":
# clip to zero since angles can be past threshold
reward += max(0, 2 * self.angle_threshold - np.abs(ox) - np.abs(oy))
if self.reward_calc == "action" or self.reward_calc == "angle_action":
# max norm will be sqr(2) ~= 1.4.
# reward is already 1.0 to add another 0.5 as o0.1 buffer from zero
reward += 0.5 - np.linalg.norm(action[0])
# log this event.
# TODO in the --use-raw-pixels case would be nice to have poses in state repeats too.
if self.event_log:
self.event_log.add(self.state, action, reward)
# return observation
return np.copy(self.state), reward, self.done, info
def render_rgb(self, camera_idx):
cameraPos = [(0.0, 0.75, 0.75), (0.75, 0.0, 0.75)][camera_idx]
targetPos = (0, 0, 0.3)
cameraUp = (0, 0, 1)
nearVal, farVal = 1, 20
fov = 60
_w, _h, rgba, _depth, _objects = p.renderImage(self.render_width, self.render_height,
cameraPos, targetPos, cameraUp,
nearVal, farVal, fov)
# convert from 1d uint8 array to (H,W,3) hacky hardcode whitened float16 array.
# TODO: for storage concerns could just store this as uint8 (which it is)
# and normalise 0->1 + whiten later.
rgba_img = np.reshape(np.asarray(rgba, dtype=np.float16),
(self.render_height, self.render_width, 4))
rgb_img = rgba_img[:,:,:3] # slice off alpha, always 1.0
rgb_img /= 255
return rgb_img
def set_state_element_for_repeat(self, repeat):
if self.use_raw_pixels:
# high dim caseis (H, W, 3, C, R)
# H, W, 3 -> height x width, 3 channel RGB image
# C -> camera_idx; 0 or 1
# R -> repeat
for camera_idx in range(self.num_cameras):
self.state[:,:,:,camera_idx,repeat] = self.render_rgb(camera_idx)
else:
# in low dim case state is (R, 2, 7)
# R -> repeat, 2 -> 2 objects (cart & pole), 7 -> 7d pose
self.state[repeat][0] = state_fields_of_pose_of(self.cart)
self.state[repeat][1] = state_fields_of_pose_of(self.pole)
def _reset(self):
# reset state
self.steps = 0
self.done = False
# reset pole on cart in starting poses
p.resetBasePositionAndOrientation(self.cart, (0,0,0.08), (0,0,0,1))
p.resetBasePositionAndOrientation(self.pole, (0,0,0.35), (0,0,0,1))
for _ in xrange(100): p.stepSimulation()
# give a fixed force push in a random direction to get things going...
theta = (np.random.random() * 2 * np.pi) if self.random_theta else 0.0
fx, fy = self.initial_force * np.cos(theta), self.initial_force * np.sin(theta)
for _ in xrange(self.initial_force_steps):
p.stepSimulation()
p.applyExternalForce(self.cart, -1, (fx, fy, 0), (0, 0, 0), p.WORLD_FRAME)
if self.delay > 0:
time.sleep(self.delay)
# bootstrap state by running for all repeats
for i in xrange(self.repeats):
self.set_state_element_for_repeat(i)
# reset event log (if applicable) and add entry with only state
if self.event_log:
self.event_log.reset()
self.event_log.add_just_state(self.state)
# return this state
return np.copy(self.state)
| [
"numpy.abs",
"pybullet.renderImage",
"numpy.empty",
"pybullet.applyExternalForce",
"gym.spaces.Discrete",
"numpy.sin",
"numpy.linalg.norm",
"pybullet.connect",
"numpy.set_printoptions",
"numpy.copy",
"pybullet.setGravity",
"event_log.EventLog",
"numpy.finfo",
"pybullet.resetBasePositionAnd... | [((147, 211), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)', 'suppress': '(True)', 'linewidth': '(10000)'}), '(precision=3, suppress=True, linewidth=10000)\n', (166, 211), True, 'import numpy as np\n'), ((2008, 2048), 'pybullet.getBasePositionAndOrientation', 'p.getBasePositionAndOrientation', (['body_id'], {}), '(body_id)\n', (2039, 2048), True, 'import pybullet as p\n'), ((2058, 2089), 'numpy.array', 'np.array', (['[x, y, z, a, b, c, d]'], {}), '([x, y, z, a, b, c, d])\n', (2066, 2089), True, 'import numpy as np\n'), ((5476, 5526), 'gym.spaces.Box', 'gym.spaces.Box', (['(-float_max)', 'float_max', 'state_shape'], {}), '(-float_max, float_max, state_shape)\n', (5490, 5526), False, 'import gym\n'), ((5714, 5753), 'numpy.empty', 'np.empty', (['state_shape'], {'dtype': 'np.float32'}), '(state_shape, dtype=np.float32)\n', (5722, 5753), True, 'import numpy as np\n'), ((5778, 5820), 'pybullet.connect', 'p.connect', (['(p.GUI if self.gui else p.DIRECT)'], {}), '(p.GUI if self.gui else p.DIRECT)\n', (5787, 5820), True, 'import pybullet as p\n'), ((5825, 5850), 'pybullet.setGravity', 'p.setGravity', (['(0)', '(0)', '(-9.81)'], {}), '(0, 0, -9.81)\n', (5837, 5850), True, 'import pybullet as p\n'), ((5855, 5908), 'pybullet.loadURDF', 'p.loadURDF', (['"""models/ground.urdf"""', '(0)', '(0)', '(0)', '(0)', '(0)', '(0)', '(1)'], {}), "('models/ground.urdf', 0, 0, 0, 0, 0, 0, 1)\n", (5865, 5908), True, 'import pybullet as p\n'), ((5920, 5974), 'pybullet.loadURDF', 'p.loadURDF', (['"""models/cart.urdf"""', '(0)', '(0)', '(0.08)', '(0)', '(0)', '(0)', '(1)'], {}), "('models/cart.urdf', 0, 0, 0.08, 0, 0, 0, 1)\n", (5930, 5974), True, 'import pybullet as p\n'), ((5986, 6040), 'pybullet.loadURDF', 'p.loadURDF', (['"""models/pole.urdf"""', '(0)', '(0)', '(0.35)', '(0)', '(0)', '(0)', '(1)'], {}), "('models/pole.urdf', 0, 0, 0.35, 0, 0, 0, 1)\n", (5996, 6040), True, 'import pybullet as p\n'), ((7437, 7479), 'pybullet.getBasePositionAndOrientation', 'p.getBasePositionAndOrientation', (['self.pole'], {}), '(self.pole)\n', (7468, 7479), True, 'import pybullet as p\n'), ((7498, 7530), 'pybullet.getEulerFromQuaternion', 'p.getEulerFromQuaternion', (['orient'], {}), '(orient)\n', (7522, 7530), True, 'import pybullet as p\n'), ((9104, 9214), 'pybullet.renderImage', 'p.renderImage', (['self.render_width', 'self.render_height', 'cameraPos', 'targetPos', 'cameraUp', 'nearVal', 'farVal', 'fov'], {}), '(self.render_width, self.render_height, cameraPos, targetPos,\n cameraUp, nearVal, farVal, fov)\n', (9117, 9214), True, 'import pybullet as p\n'), ((10472, 10544), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['self.cart', '(0, 0, 0.08)', '(0, 0, 0, 1)'], {}), '(self.cart, (0, 0, 0.08), (0, 0, 0, 1))\n', (10505, 10544), True, 'import pybullet as p\n'), ((10544, 10616), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['self.pole', '(0, 0, 0.35)', '(0, 0, 0, 1)'], {}), '(self.pole, (0, 0, 0.35), (0, 0, 0, 1))\n', (10577, 10616), True, 'import pybullet as p\n'), ((11434, 11453), 'numpy.copy', 'np.copy', (['self.state'], {}), '(self.state)\n', (11441, 11453), True, 'import numpy as np\n'), ((3713, 3731), 'gym.spaces.Discrete', 'spaces.Discrete', (['(5)'], {}), '(5)\n', (3728, 3731), False, 'from gym import spaces\n'), ((3768, 3803), 'gym.spaces.Box', 'spaces.Box', (['(-1.0)', '(1.0)'], {'shape': '(1, 2)'}), '(-1.0, 1.0, shape=(1, 2))\n', (3778, 3803), False, 'from gym import spaces\n'), ((3899, 3958), 'event_log.EventLog', 'event_log.EventLog', (['opts.event_log_out', 'opts.use_raw_pixels'], {}), '(opts.event_log_out, opts.use_raw_pixels)\n', (3917, 3958), False, 'import event_log\n'), ((5422, 5442), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (5430, 5442), True, 'import numpy as np\n'), ((8824, 8843), 'numpy.copy', 'np.copy', (['self.state'], {}), '(self.state)\n', (8831, 8843), True, 'import numpy as np\n'), ((9542, 9576), 'numpy.asarray', 'np.asarray', (['rgba'], {'dtype': 'np.float16'}), '(rgba, dtype=np.float16)\n', (9552, 9576), True, 'import numpy as np\n'), ((10638, 10656), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (10654, 10656), True, 'import pybullet as p\n'), ((10945, 10963), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (10961, 10963), True, 'import pybullet as p\n'), ((10970, 11044), 'pybullet.applyExternalForce', 'p.applyExternalForce', (['self.cart', '(-1)', '(fx, fy, 0)', '(0, 0, 0)', 'p.WORLD_FRAME'], {}), '(self.cart, -1, (fx, fy, 0), (0, 0, 0), p.WORLD_FRAME)\n', (10990, 11044), True, 'import pybullet as p\n'), ((6283, 6302), 'numpy.copy', 'np.copy', (['self.state'], {}), '(self.state)\n', (6290, 6302), True, 'import numpy as np\n'), ((7047, 7065), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (7063, 7065), True, 'import pybullet as p\n'), ((7074, 7148), 'pybullet.applyExternalForce', 'p.applyExternalForce', (['self.cart', '(-1)', '(fx, fy, 0)', '(0, 0, 0)', 'p.WORLD_FRAME'], {}), '(self.cart, -1, (fx, fy, 0), (0, 0, 0), p.WORLD_FRAME)\n', (7094, 7148), True, 'import pybullet as p\n'), ((8572, 8597), 'numpy.linalg.norm', 'np.linalg.norm', (['action[0]'], {}), '(action[0])\n', (8586, 8597), True, 'import numpy as np\n'), ((10842, 10855), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (10848, 10855), True, 'import numpy as np\n'), ((10878, 10891), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (10884, 10891), True, 'import numpy as np\n'), ((11078, 11100), 'time.sleep', 'time.sleep', (['self.delay'], {}), '(self.delay)\n', (11088, 11100), False, 'import time\n'), ((7182, 7204), 'time.sleep', 'time.sleep', (['self.delay'], {}), '(self.delay)\n', (7192, 7204), False, 'import time\n'), ((8349, 8359), 'numpy.abs', 'np.abs', (['oy'], {}), '(oy)\n', (8355, 8359), True, 'import numpy as np\n'), ((10746, 10764), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (10762, 10764), True, 'import numpy as np\n'), ((8336, 8346), 'numpy.abs', 'np.abs', (['ox'], {}), '(ox)\n', (8342, 8346), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `hotelling` package."""
import pytest
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal, assert_frame_equal
from hotelling.stats import hotelling_t2
def test_hotelling_test_array_two_sample():
x = np.asarray([[23, 45, 15], [40, 85, 18], [215, 307, 60], [110, 110, 50], [65, 105, 24]])
y = np.asarray([[277, 230, 63], [153, 80, 29], [306, 440, 105], [252, 350, 175], [143, 205, 42]])
res = hotelling_t2(x, y)
assert round(res[0], 4) == 11.1037 # T2
assert round(res[1], 4) == 2.7759 # F
assert round(res[2], 5) == 0.15004 # p value
def test_hotelling_test_df_two_sample():
x = pd.DataFrame([[23, 45, 15], [40, 85, 18], [215, 307, 60], [110, 110, 50], [65, 105, 24]])
y = pd.DataFrame([[277, 230, 63], [153, 80, 29], [306, 440, 105], [252, 350, 175], [143, 205, 42]])
res = hotelling_t2(x, y)
assert round(res[0], 4) == 11.1037 # T2
assert round(res[1], 4) == 2.7759 # F
assert round(res[2], 5) == 0.15004 # p value
def test_hotelling_test_df_two_sample_no_bessel():
x = pd.DataFrame([[23, 45, 15], [40, 85, 18], [215, 307, 60], [110, 110, 50], [65, 105, 24]])
y = pd.DataFrame([[277, 230, 63], [153, 80, 29], [306, 440, 105], [252, 350, 175], [143, 205, 42]])
res = hotelling_t2(x, y, bessel=False)
assert round(res[0], 4) == 11.1037 # T2
assert round(res[1], 4) == 2.2207 # F
assert round(res[2], 5) == 0.17337
def test_nutrients_data_integrity_means_procedure():
df = pd.read_csv('data/nutrient.txt', delimiter=' ', skipinitialspace=True, index_col=0)
res = df.describe().T
assert (res['count'] == [737, 737, 737, 737, 737]).all()
# mean
assert_series_equal(res['mean'],
pd.Series([624.0492537, 11.1298996, 65.8034410, 839.6353460, 78.9284464],
name='mean',
index=pd.Index(['calcium', 'iron', 'protein', 'a', 'c'], dtype='object')),
check_less_precise=7)
# for the next one, SAS displays 1633.54 for 'a' - that is an error, inconsistent. everything else is 7 digits
# standard deviation
assert_series_equal(res['std'],
pd.Series([397.2775401, 5.9841905, 30.5757564, 1633.5398283, 73.5952721],
name='std',
index=pd.Index(['calcium', 'iron', 'protein', 'a', 'c'], dtype='object')),
check_less_precise=7)
# min
assert_series_equal(res['min'],
pd.Series([7.4400000, 0, 0, 0, 0],
name='min',
index=pd.Index(['calcium', 'iron', 'protein', 'a', 'c'], dtype='object')),
check_less_precise=7)
# max
assert_series_equal(res['max'],
pd.Series([2866.44, 58.6680000, 251.0120000, 34434.27, 433.3390000],
name='max',
index=pd.Index(['calcium', 'iron', 'protein', 'a', 'c'], dtype='object')),
check_less_precise=7)
def test_nutrient_data_corr_procedure():
df = pd.read_csv('data/nutrient.txt', delimiter=' ', skipinitialspace=True, index_col=0)
# Covariance matrix
cov = df.cov()
assert_frame_equal(cov,
pd.DataFrame([[157829.444, 940.089, 6075.816, 102411.127, 6701.616],
[940.089, 35.811, 114.058, 2383.153, 137.672],
[6075.816, 114.058, 934.877, 7330.052, 477.200],
[102411.127, 2383.153, 7330.052, 2668452.371, 22063.249],
[6701.616, 137.672, 477.200, 22063.249, 5416.264]
],
index=pd.Index(['calcium', 'iron', 'protein', 'a', 'c'], dtype='object'),
columns=pd.Index(['calcium', 'iron', 'protein', 'a', 'c'], dtype='object')),
check_less_precise=3)
# Pearson Correlation
corr = df.corr()
assert_frame_equal(corr,
pd.DataFrame([[1.00000, 0.39543, 0.50019, 0.15781, 0.22921],
[0.39543, 1.00000, 0.62337, 0.24379, 0.31260],
[0.50019, 0.62337, 1.00000, 0.14676, 0.21207],
[0.15781, 0.24379, 0.14676, 1.00000, 0.18352],
[0.22921, 0.31260, 0.21207, 0.18352, 1.00000]
],
index=pd.Index(['calcium', 'iron', 'protein', 'a', 'c'], dtype='object'),
columns=pd.Index(['calcium', 'iron', 'protein', 'a', 'c'], dtype='object')),
check_less_precise=3)
def test_mu0():
"""test_mu0
One sample T-squared test. Hypothesis tested: mu = mu0
:return:
"""
# mu0 is the USDA recommended daily intake
mu0 = np.array([1000, 15, 60, 800, 75])
# 1985 USDA data
df = pd.read_csv('data/nutrient.txt', delimiter=' ', skipinitialspace=True, index_col=0)
res = hotelling_t2(df, mu0)
assert round(res[0], 4) == 1758.5413 # T2
assert round(res[1], 4) == 349.7968 # F
assert round(res[2], 4) == 0.0000 # p-value
| [
"pandas.DataFrame",
"pandas.read_csv",
"numpy.asarray",
"pandas.Index",
"hotelling.stats.hotelling_t2",
"numpy.array"
] | [((301, 392), 'numpy.asarray', 'np.asarray', (['[[23, 45, 15], [40, 85, 18], [215, 307, 60], [110, 110, 50], [65, 105, 24]]'], {}), '([[23, 45, 15], [40, 85, 18], [215, 307, 60], [110, 110, 50], [65,\n 105, 24]])\n', (311, 392), True, 'import numpy as np\n'), ((397, 494), 'numpy.asarray', 'np.asarray', (['[[277, 230, 63], [153, 80, 29], [306, 440, 105], [252, 350, 175], [143, 205,\n 42]]'], {}), '([[277, 230, 63], [153, 80, 29], [306, 440, 105], [252, 350, 175],\n [143, 205, 42]])\n', (407, 494), True, 'import numpy as np\n'), ((501, 519), 'hotelling.stats.hotelling_t2', 'hotelling_t2', (['x', 'y'], {}), '(x, y)\n', (513, 519), False, 'from hotelling.stats import hotelling_t2\n'), ((709, 803), 'pandas.DataFrame', 'pd.DataFrame', (['[[23, 45, 15], [40, 85, 18], [215, 307, 60], [110, 110, 50], [65, 105, 24]]'], {}), '([[23, 45, 15], [40, 85, 18], [215, 307, 60], [110, 110, 50], [\n 65, 105, 24]])\n', (721, 803), True, 'import pandas as pd\n'), ((807, 907), 'pandas.DataFrame', 'pd.DataFrame', (['[[277, 230, 63], [153, 80, 29], [306, 440, 105], [252, 350, 175], [143, 205,\n 42]]'], {}), '([[277, 230, 63], [153, 80, 29], [306, 440, 105], [252, 350, \n 175], [143, 205, 42]])\n', (819, 907), True, 'import pandas as pd\n'), ((913, 931), 'hotelling.stats.hotelling_t2', 'hotelling_t2', (['x', 'y'], {}), '(x, y)\n', (925, 931), False, 'from hotelling.stats import hotelling_t2\n'), ((1131, 1225), 'pandas.DataFrame', 'pd.DataFrame', (['[[23, 45, 15], [40, 85, 18], [215, 307, 60], [110, 110, 50], [65, 105, 24]]'], {}), '([[23, 45, 15], [40, 85, 18], [215, 307, 60], [110, 110, 50], [\n 65, 105, 24]])\n', (1143, 1225), True, 'import pandas as pd\n'), ((1229, 1329), 'pandas.DataFrame', 'pd.DataFrame', (['[[277, 230, 63], [153, 80, 29], [306, 440, 105], [252, 350, 175], [143, 205,\n 42]]'], {}), '([[277, 230, 63], [153, 80, 29], [306, 440, 105], [252, 350, \n 175], [143, 205, 42]])\n', (1241, 1329), True, 'import pandas as pd\n'), ((1335, 1367), 'hotelling.stats.hotelling_t2', 'hotelling_t2', (['x', 'y'], {'bessel': '(False)'}), '(x, y, bessel=False)\n', (1347, 1367), False, 'from hotelling.stats import hotelling_t2\n'), ((1559, 1646), 'pandas.read_csv', 'pd.read_csv', (['"""data/nutrient.txt"""'], {'delimiter': '""" """', 'skipinitialspace': '(True)', 'index_col': '(0)'}), "('data/nutrient.txt', delimiter=' ', skipinitialspace=True,\n index_col=0)\n", (1570, 1646), True, 'import pandas as pd\n'), ((3252, 3339), 'pandas.read_csv', 'pd.read_csv', (['"""data/nutrient.txt"""'], {'delimiter': '""" """', 'skipinitialspace': '(True)', 'index_col': '(0)'}), "('data/nutrient.txt', delimiter=' ', skipinitialspace=True,\n index_col=0)\n", (3263, 3339), True, 'import pandas as pd\n'), ((5136, 5169), 'numpy.array', 'np.array', (['[1000, 15, 60, 800, 75]'], {}), '([1000, 15, 60, 800, 75])\n', (5144, 5169), True, 'import numpy as np\n'), ((5201, 5288), 'pandas.read_csv', 'pd.read_csv', (['"""data/nutrient.txt"""'], {'delimiter': '""" """', 'skipinitialspace': '(True)', 'index_col': '(0)'}), "('data/nutrient.txt', delimiter=' ', skipinitialspace=True,\n index_col=0)\n", (5212, 5288), True, 'import pandas as pd\n'), ((5296, 5317), 'hotelling.stats.hotelling_t2', 'hotelling_t2', (['df', 'mu0'], {}), '(df, mu0)\n', (5308, 5317), False, 'from hotelling.stats import hotelling_t2\n'), ((1964, 2030), 'pandas.Index', 'pd.Index', (["['calcium', 'iron', 'protein', 'a', 'c']"], {'dtype': '"""object"""'}), "(['calcium', 'iron', 'protein', 'a', 'c'], dtype='object')\n", (1972, 2030), True, 'import pandas as pd\n'), ((2439, 2505), 'pandas.Index', 'pd.Index', (["['calcium', 'iron', 'protein', 'a', 'c']"], {'dtype': '"""object"""'}), "(['calcium', 'iron', 'protein', 'a', 'c'], dtype='object')\n", (2447, 2505), True, 'import pandas as pd\n'), ((2745, 2811), 'pandas.Index', 'pd.Index', (["['calcium', 'iron', 'protein', 'a', 'c']"], {'dtype': '"""object"""'}), "(['calcium', 'iron', 'protein', 'a', 'c'], dtype='object')\n", (2753, 2811), True, 'import pandas as pd\n'), ((3085, 3151), 'pandas.Index', 'pd.Index', (["['calcium', 'iron', 'protein', 'a', 'c']"], {'dtype': '"""object"""'}), "(['calcium', 'iron', 'protein', 'a', 'c'], dtype='object')\n", (3093, 3151), True, 'import pandas as pd\n'), ((3933, 3999), 'pandas.Index', 'pd.Index', (["['calcium', 'iron', 'protein', 'a', 'c']"], {'dtype': '"""object"""'}), "(['calcium', 'iron', 'protein', 'a', 'c'], dtype='object')\n", (3941, 3999), True, 'import pandas as pd\n'), ((4045, 4111), 'pandas.Index', 'pd.Index', (["['calcium', 'iron', 'protein', 'a', 'c']"], {'dtype': '"""object"""'}), "(['calcium', 'iron', 'protein', 'a', 'c'], dtype='object')\n", (4053, 4111), True, 'import pandas as pd\n'), ((4737, 4803), 'pandas.Index', 'pd.Index', (["['calcium', 'iron', 'protein', 'a', 'c']"], {'dtype': '"""object"""'}), "(['calcium', 'iron', 'protein', 'a', 'c'], dtype='object')\n", (4745, 4803), True, 'import pandas as pd\n'), ((4849, 4915), 'pandas.Index', 'pd.Index', (["['calcium', 'iron', 'protein', 'a', 'c']"], {'dtype': '"""object"""'}), "(['calcium', 'iron', 'protein', 'a', 'c'], dtype='object')\n", (4857, 4915), True, 'import pandas as pd\n')] |
#!/usr/bin/env python3
"""
logistic regression
"""
import numpy as np
from loguru import logger
from scipy.optimize import minimize
from sklearn.utils.extmath import safe_sparse_dot
from scipy.special import logsumexp
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder, LabelBinarizer
from sklearn.linear_model import SGDClassifier
import math
NUM_EPOCHS = 10
BATCH_SIZE = 32
# SGD-based logistic regression, which has minibatch also
class LogisticRegression:
"""
An L2-regularized linear model that uses SGD to minimize the in-sample error function.
"""
def __init__(self, learning_rate=0.01, regularization_strength=0.0, **args):
"""
Initialize the linear model.
"""
self._w = None
self._n_epochs = NUM_EPOCHS
self._learning_rate = learning_rate
self._batch_size = BATCH_SIZE
self._regularization_strength = regularization_strength
def fit(self, X, y):
"""
Fit the model with training data.
"""
X = X.toarray()
X = np.hstack((np.ones((X.shape[0], 1)), X)) # x_0 is always 1
self._w = np.random.randn(X.shape[1], 1)
batch_size = self._batch_size if self._batch_size is not None else X.shape[0]
for i in range(self._n_epochs):
print(i)
for j in range(int(X.shape[0] / batch_size)):
learning_rate = self._learning_rate if isinstance(self._learning_rate, float) \
else self._learning_rate(i * (X.shape[0] / batch_size) + j)
sample = np.random.choice(
X.shape[0], batch_size, replace=False)
self._w -= learning_rate * \
self.gradient(X[sample, :], y[sample])
def theta(self, s):
return (math.e ** s) / (1 + math.e ** s)
def gradient(self, X, y):
gradient = np.zeros((X.shape[1], 1))
for xi, yi in zip(X, y):
gradient += np.reshape(self.theta(-yi * np.dot(np.transpose(self._w), xi)) * yi * xi,
(X.shape[1], 1))
gradient *= (-1.0 / X.shape[0])
return gradient + 2.0 * self._regularization_strength * self._w
def predict(self, X):
X = np.hstack((np.ones((X.shape[0], 1)), X)) # x_0 is always 1
res = np.vectorize(lambda x: self.theta(x))(
np.dot(np.transpose(self._w), np.transpose(X)).flatten()
)
print(res)
rounded = np.rint(res)
print(rounded)
return rounded
def score(self, X, y, sample_weight=None):
"""
Return the mean accuracy on the given test data and labels.
"""
X = X.toarray()
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
if __name__ == '__main__':
raise RuntimeError("logistic regression cannot be run on its own")
| [
"numpy.random.randn",
"numpy.zeros",
"numpy.ones",
"numpy.transpose",
"numpy.rint",
"numpy.random.choice"
] | [((1165, 1195), 'numpy.random.randn', 'np.random.randn', (['X.shape[1]', '(1)'], {}), '(X.shape[1], 1)\n', (1180, 1195), True, 'import numpy as np\n'), ((1907, 1932), 'numpy.zeros', 'np.zeros', (['(X.shape[1], 1)'], {}), '((X.shape[1], 1))\n', (1915, 1932), True, 'import numpy as np\n'), ((2496, 2508), 'numpy.rint', 'np.rint', (['res'], {}), '(res)\n', (2503, 2508), True, 'import numpy as np\n'), ((1098, 1122), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (1105, 1122), True, 'import numpy as np\n'), ((1602, 1657), 'numpy.random.choice', 'np.random.choice', (['X.shape[0]', 'batch_size'], {'replace': '(False)'}), '(X.shape[0], batch_size, replace=False)\n', (1618, 1657), True, 'import numpy as np\n'), ((2278, 2302), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (2285, 2302), True, 'import numpy as np\n'), ((2399, 2420), 'numpy.transpose', 'np.transpose', (['self._w'], {}), '(self._w)\n', (2411, 2420), True, 'import numpy as np\n'), ((2422, 2437), 'numpy.transpose', 'np.transpose', (['X'], {}), '(X)\n', (2434, 2437), True, 'import numpy as np\n'), ((2025, 2046), 'numpy.transpose', 'np.transpose', (['self._w'], {}), '(self._w)\n', (2037, 2046), True, 'import numpy as np\n')] |
import logging
from typing import Any, Dict, List, Text, Tuple, Optional
from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer
from rasa.nlu.components import Component
from rasa.nlu.config import RasaNLUModelConfig
from rasa.nlu.training_data import Message, TrainingData
from rasa.nlu.tokenizers.tokenizer import Token
import rasa.utils.train_utils as train_utils
import numpy as np
from rasa.nlu.constants import (
TEXT,
LANGUAGE_MODEL_DOCS,
DENSE_FEATURIZABLE_ATTRIBUTES,
TOKEN_IDS,
TOKENS,
SENTENCE_FEATURES,
SEQUENCE_FEATURES,
)
logger = logging.getLogger(__name__)
class HFTransformersNLP(Component):
"""Utility Component for interfacing between Transformers library.
The transformers(https://github.com/huggingface/transformers) library
is used to load pre-trained language models like BERT, GPT-2, etc.
The component also tokenizes and featurizes dense featurizable attributes of each
message.
"""
defaults = {
# name of the language model to load.
"model_name": "bert",
# Pre-Trained weights to be loaded(string)
"model_weights": None,
}
def __init__(self, component_config: Optional[Dict[Text, Any]] = None) -> None:
super(HFTransformersNLP, self).__init__(component_config)
self._load_model()
self.whitespace_tokenizer = WhitespaceTokenizer()
def _load_model(self) -> None:
"""Try loading the model"""
from rasa.nlu.utils.hugging_face.registry import (
model_class_dict,
model_weights_defaults,
model_tokenizer_dict,
)
self.model_name = self.component_config["model_name"]
if self.model_name not in model_class_dict:
raise KeyError(
f"'{self.model_name}' not a valid model name. Choose from "
f"{str(list(model_class_dict.keys()))}or create"
f"a new class inheriting from this class to support your model."
)
self.model_weights = self.component_config["model_weights"]
if not self.model_weights:
logger.info(
f"Model weights not specified. Will choose default model weights: "
f"{model_weights_defaults[self.model_name]}"
)
self.model_weights = model_weights_defaults[self.model_name]
logger.debug(f"Loading Tokenizer and Model for {self.model_name}")
self.tokenizer = model_tokenizer_dict[self.model_name].from_pretrained(
self.model_weights
)
self.model = model_class_dict[self.model_name].from_pretrained(
self.model_weights
)
# Use a universal pad token since all transformer architectures do not have a
# consistent token. Instead of pad_token_id we use unk_token_id because
# pad_token_id is not set for all architectures. We can't add a new token as
# well since vocabulary resizing is not yet supported for TF classes.
# Also, this does not hurt the model predictions since we use an attention mask
# while feeding input.
self.pad_token_id = self.tokenizer.unk_token_id
@classmethod
def required_packages(cls) -> List[Text]:
return ["transformers"]
def _lm_tokenize(self, text: Text) -> Tuple[List[int], List[Text]]:
split_token_ids = self.tokenizer.encode(text, add_special_tokens=False)
split_token_strings = self.tokenizer.convert_ids_to_tokens(split_token_ids)
return split_token_ids, split_token_strings
def _add_lm_specific_special_tokens(
self, token_ids: List[List[int]]
) -> List[List[int]]:
from rasa.nlu.utils.hugging_face.registry import (
model_special_tokens_pre_processors,
)
augmented_tokens = [
model_special_tokens_pre_processors[self.model_name](example_token_ids)
for example_token_ids in token_ids
]
return augmented_tokens
def _lm_specific_token_cleanup(self, token_strings: List[Text]) -> List[Text]:
from rasa.nlu.utils.hugging_face.registry import model_tokens_cleaners
return model_tokens_cleaners[self.model_name](token_strings)
def _post_process_sequence_embeddings(
self, sequence_embeddings: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
from rasa.nlu.utils.hugging_face.registry import (
model_embeddings_post_processors,
)
sentence_embeddings = []
post_processed_sequence_embeddings = []
for example_embedding in sequence_embeddings:
(
example_sentence_embedding,
example_post_processed_embedding,
) = model_embeddings_post_processors[self.model_name](example_embedding)
sentence_embeddings.append(example_sentence_embedding)
post_processed_sequence_embeddings.append(example_post_processed_embedding)
return (
np.array(sentence_embeddings),
np.array(post_processed_sequence_embeddings),
)
def _tokenize_example(
self, message: Message, attribute: Text
) -> Tuple[List[Token], List[int]]:
tokens_in = self.whitespace_tokenizer.tokenize(message, attribute)
tokens_out = []
token_ids_out = []
for token in tokens_in:
# use lm specific tokenizer to further tokenize the text
split_token_ids, split_token_strings = self._lm_tokenize(token.text)
split_token_strings = self._lm_specific_token_cleanup(split_token_strings)
token_ids_out += split_token_ids
tokens_out += train_utils.align_tokens(
split_token_strings, token.end, token.start
)
return tokens_out, token_ids_out
def _get_token_ids_for_batch(
self, batch_examples: List[Message], attribute: Text
) -> Tuple[List[List[Token]], List[List[int]]]:
batch_token_ids = []
batch_tokens = []
for example in batch_examples:
example_tokens, example_token_ids = self._tokenize_example(
example, attribute
)
batch_tokens.append(example_tokens)
batch_token_ids.append(example_token_ids)
return batch_tokens, batch_token_ids
@staticmethod
def _compute_attention_mask(actual_sequence_lengths: List[int]) -> np.ndarray:
attention_mask = []
max_seq_length = max(actual_sequence_lengths)
for actual_sequence_length in actual_sequence_lengths:
# add 1s for present tokens, fill up the remaining space up to max
# sequence length with 0s (non-existing tokens)
padded_sequence = [1] * actual_sequence_length + [0] * (
max_seq_length - actual_sequence_length
)
attention_mask.append(padded_sequence)
attention_mask = np.array(attention_mask).astype(np.float32)
return attention_mask
def _add_padding_to_batch(
self, batch_token_ids: List[List[int]]
) -> Tuple[List[int], List[List[int]]]:
padded_token_ids = []
# Compute max length across examples
max_seq_len = 0
actual_sequence_lengths = []
for example_token_ids in batch_token_ids:
actual_sequence_lengths.append(len(example_token_ids))
max_seq_len = max(max_seq_len, len(example_token_ids))
# Add padding according to max_seq_len
# Some models don't contain pad token, we use unknown token as padding token.
# This doesn't affect the computation since we compute an attention mask
# anyways.
for example_token_ids in batch_token_ids:
padded_token_ids.append(
example_token_ids
+ [self.pad_token_id] * (max_seq_len - len(example_token_ids))
)
return actual_sequence_lengths, padded_token_ids
@staticmethod
def _extract_nonpadded_embeddings(
embeddings: np.ndarray, actual_sequence_lengths: List[int]
) -> np.ndarray:
nonpadded_sequence_embeddings = []
for index, embedding in enumerate(embeddings):
unmasked_embedding = embedding[: actual_sequence_lengths[index]]
nonpadded_sequence_embeddings.append(unmasked_embedding)
return np.array(nonpadded_sequence_embeddings)
def _compute_batch_sequence_features(
self, batch_attention_mask: np.ndarray, padded_token_ids: List[List[int]]
) -> np.ndarray:
model_outputs = self.model(
np.array(padded_token_ids), attention_mask=np.array(batch_attention_mask)
)
# sequence hidden states is always the first output from all models
sequence_hidden_states = model_outputs[0]
sequence_hidden_states = sequence_hidden_states.numpy()
return sequence_hidden_states
def _get_model_features_for_batch(
self, batch_token_ids: List[List[int]]
) -> Tuple[np.ndarray, np.ndarray]:
# Let's first add tokenizer specific special tokens to all examples
batch_token_ids_augmented = self._add_lm_specific_special_tokens(
batch_token_ids
)
# Let's first add padding so that whole batch can be fed to the model
actual_sequence_lengths, padded_token_ids = self._add_padding_to_batch(
batch_token_ids_augmented
)
# Compute attention mask based on actual_sequence_length
batch_attention_mask = self._compute_attention_mask(actual_sequence_lengths)
# Get token level features from the model
sequence_hidden_states = self._compute_batch_sequence_features(
batch_attention_mask, padded_token_ids
)
# Extract features for only non-padding tokens
sequence_nonpadded_embeddings = self._extract_nonpadded_embeddings(
sequence_hidden_states, actual_sequence_lengths
)
# Extract sentence level and post-processed features
(
sentence_embeddings,
sequence_final_embeddings,
) = self._post_process_sequence_embeddings(sequence_nonpadded_embeddings)
return sentence_embeddings, sequence_final_embeddings
def _get_docs_for_batch(
self, batch_examples: List[Message], attribute: Text
) -> List[Dict[Text, Any]]:
batch_tokens, batch_token_ids = self._get_token_ids_for_batch(
batch_examples, attribute
)
(
batch_sentence_features,
batch_sequence_features,
) = self._get_model_features_for_batch(batch_token_ids)
# A doc consists of
# {'token_ids': ..., 'tokens': ..., 'sequence_features': ..., 'sentence_features': ...}
batch_docs = []
for index in range(len(batch_examples)):
doc = {
TOKEN_IDS: batch_token_ids[index],
TOKENS: batch_tokens[index],
SEQUENCE_FEATURES: batch_sequence_features[index],
SENTENCE_FEATURES: np.reshape(batch_sentence_features[index], (1, -1)),
}
batch_docs.append(doc)
return batch_docs
def train(
self,
training_data: TrainingData,
config: Optional[RasaNLUModelConfig] = None,
**kwargs: Any,
) -> None:
batch_size = 64
for attribute in DENSE_FEATURIZABLE_ATTRIBUTES:
non_empty_examples = list(
filter(lambda x: x.get(attribute), training_data.training_examples)
)
batch_start_index = 0
while batch_start_index < len(non_empty_examples):
batch_end_index = min(
batch_start_index + batch_size, len(non_empty_examples)
)
# Collect batch examples
batch_messages = non_empty_examples[batch_start_index:batch_end_index]
# Construct a doc with relevant features extracted(tokens, dense_features)
batch_docs = self._get_docs_for_batch(batch_messages, attribute)
for index, ex in enumerate(batch_messages):
ex.set(LANGUAGE_MODEL_DOCS[attribute], batch_docs[index])
batch_start_index += batch_size
def process(self, message: Message, **kwargs: Any) -> None:
message.set(
LANGUAGE_MODEL_DOCS[TEXT],
self._get_docs_for_batch([message], attribute=TEXT)[0],
)
| [
"rasa.nlu.tokenizers.whitespace_tokenizer.WhitespaceTokenizer",
"rasa.utils.train_utils.align_tokens",
"rasa.nlu.utils.hugging_face.registry.model_class_dict.keys",
"numpy.array",
"numpy.reshape",
"logging.getLogger"
] | [((594, 621), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (611, 621), False, 'import logging\n'), ((1381, 1402), 'rasa.nlu.tokenizers.whitespace_tokenizer.WhitespaceTokenizer', 'WhitespaceTokenizer', ([], {}), '()\n', (1400, 1402), False, 'from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer\n'), ((8391, 8430), 'numpy.array', 'np.array', (['nonpadded_sequence_embeddings'], {}), '(nonpadded_sequence_embeddings)\n', (8399, 8430), True, 'import numpy as np\n'), ((5015, 5044), 'numpy.array', 'np.array', (['sentence_embeddings'], {}), '(sentence_embeddings)\n', (5023, 5044), True, 'import numpy as np\n'), ((5058, 5102), 'numpy.array', 'np.array', (['post_processed_sequence_embeddings'], {}), '(post_processed_sequence_embeddings)\n', (5066, 5102), True, 'import numpy as np\n'), ((5703, 5772), 'rasa.utils.train_utils.align_tokens', 'train_utils.align_tokens', (['split_token_strings', 'token.end', 'token.start'], {}), '(split_token_strings, token.end, token.start)\n', (5727, 5772), True, 'import rasa.utils.train_utils as train_utils\n'), ((8625, 8651), 'numpy.array', 'np.array', (['padded_token_ids'], {}), '(padded_token_ids)\n', (8633, 8651), True, 'import numpy as np\n'), ((6961, 6985), 'numpy.array', 'np.array', (['attention_mask'], {}), '(attention_mask)\n', (6969, 6985), True, 'import numpy as np\n'), ((8668, 8698), 'numpy.array', 'np.array', (['batch_attention_mask'], {}), '(batch_attention_mask)\n', (8676, 8698), True, 'import numpy as np\n'), ((11095, 11146), 'numpy.reshape', 'np.reshape', (['batch_sentence_features[index]', '(1, -1)'], {}), '(batch_sentence_features[index], (1, -1))\n', (11105, 11146), True, 'import numpy as np\n'), ((1893, 1916), 'rasa.nlu.utils.hugging_face.registry.model_class_dict.keys', 'model_class_dict.keys', ([], {}), '()\n', (1914, 1916), False, 'from rasa.nlu.utils.hugging_face.registry import model_class_dict, model_weights_defaults, model_tokenizer_dict\n')] |
import os
import sys
import pickle as pkl
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
from utils import *
plt.rcParams['text.usetex'] = True #Let TeX do the typsetting
plt.rcParams['text.latex.preamble'] = [
r'\usepackage{sansmath}', r'\sansmath'
] #Force sans-serif math mode (for axes labels)
plt.rcParams['font.family'] = 'sans-serif' # ... for regular text
plt.rcParams[
'font.sans-serif'] = 'Computer Modern Sans serif' # Choose a nice font here
def cs_subplot(ax):
MIN_REQS = 0
MAX_REQS = 150000
def read_logs(log_file, search_text):
"""Matches `search_text` within the Text param of the log.
Returns a list of values on match from the entire log file.
Log file format:
Timestamp Location Severity Text Value
Search Text should be in lowercase
"""
values = []
appl_values = None
with open(log_file, "r") as f:
while (line := f.readline().rstrip()):
log_line = line.split()
# Extract text
text = ' '.join(log_line[3:-1])
completed_text = 'completed reqs'
if completed_text in text.lower():
if float(log_line[-1]) > MIN_REQS and appl_values is None:
appl_values = {}
if int(log_line[-1]) >= MAX_REQS:
break
if search_text in text.lower():
if float(log_line[-1]) > MAX_REQS:
continue
values.append(float(log_line[-1]))
appl = int(log_line[-2])
if appl_values is not None:
if appl in appl_values:
appl_values[appl].append(float(log_line[-1]))
else:
appl_values[appl] = [float(log_line[-1])]
return values, appl_values
def slo_violations(log_file):
starts = ["cold start time", "warm start time", "dedup start time"]
appl_startups = {}
for start_text in starts:
_, appl_values = read_logs(log_file, start_text)
for appl in appl_values:
if appl not in appl_startups:
appl_startups[appl] = appl_values[appl]
else:
appl_startups[appl].extend(appl_values[appl])
slo = {1: 500, 2: 500, 3: 1000, 4: 1000, 5: 1000}
num_violations = 0
for appl in appl_startups:
num = sum(s > slo[appl] for s in appl_startups[appl])
num_violations += num
return num_violations
def num_cold_start_per_appl(log_file):
_, appl_values = read_logs(log_file, "cold start time")
appl_startups = {}
for appl in appl_values:
if appl not in appl_startups:
appl_startups[appl] = appl_values[appl]
else:
appl_startups[appl].extend(appl_values[appl])
appl_coldstarts = {}
for appl in appl_startups:
appl_coldstarts[appl] = len(appl_startups[appl])
print('app', appl, 'has', appl_coldstarts[appl])
for appl in appl_names:
if appl not in appl_coldstarts:
appl_coldstarts[appl] = 0
return appl_coldstarts
# policy = 'Memory' if sys.argv[1] == 'MC' else 'Latency'
# append = '' if sys.argv[5] == 'NONE' else sys.argv[5]
eval_adaptive = len(sys.argv) == 4
if len(sys.argv) == 4:
names = ['Medes', 'Fixed Keep-Alive', 'Adaptive Keep-Alive']
results = {'fixedka': [], 'medes': [], 'adaptiveka': []}
elif len(sys.argv) == 3:
names = ['Medes', 'Fixed Keep-Alive']
results = {'fixedka': [], 'medes': []}
logfiles = []
for i in range(1, len(sys.argv)):
# The directory with all the logfiles
logfile_dir = sys.argv[i]
logfile = os.path.join(logfile_dir, "logfileC")
logfiles.append(logfile)
base_logfile_dir = sys.argv[1]
medes = num_cold_start_per_appl(logfiles[0])
fixedka = num_cold_start_per_appl(logfiles[1])
if eval_adaptive:
adaptiveka = num_cold_start_per_appl(logfiles[2])
appl_order = []
if len(fixedka) != len(medes):
print("MAYBE AN ERROR")
sorted_keys = sorted(appl_names.items(), key=lambda i: i[0])
for appl in sorted_keys:
print(appl, fixedka[appl[0]], medes[appl[0]])
# if medes[appl[0]] > fixedka[appl[0]]:
# medes[appl[0]] = 0.9 * fixedka[appl[0]]
results['fixedka'].append(fixedka[appl[0]])
results['medes'].append(medes[appl[0]])
print(fixedka[appl[0]] / medes[appl[0]],
adaptiveka[appl[0]] / medes[appl[0]])
if eval_adaptive:
results['adaptiveka'].append(adaptiveka[appl[0]])
appl_order.append(appl[0])
# policies = []
# for logfile in logfiles:
# num1 = num_cold_starts(logfile)
# num2 = slo_violations(logfile)
# policies.append([num1, num2])
xs = np.arange(len(fixedka))
width = 0.24
# a separate plot for each percentage
# pcts_to_plot = ['50', '99.9']
y_upperbound = 800 # @Divyanshu update this y-axis range as you see fit
y_num_ticks = 5 # how many ticks between 0 and y_upperbound (inclusive)are labeled
# print(policies)
# Grouped bar plot
# bar1 = plt.bar(ind, [policies[0][0]], width, color='r')
# bar2 = plt.bar(ind + width, [policies[1][0]], width, color='g')
# plt.ylabel("Number")
# plt.xticks(ind, ['Cold Starts', 'SLO Violations'])
# plt.legend((bar1, bar2), ('Heuristic', 'None'))
# plt.savefig(os.path.join(base_logfile_dir, 'comparison.png'))
# @TAO: HERE
# cold_starts = [20 * policies[0][0], 20 * policies[1][0]]
if eval_adaptive:
print(results['fixedka'])
ax.bar(xs - width,
results['fixedka'],
width=width,
edgecolor='black',
hatch='//',
label=names[1])
print(results['adaptiveka'])
ax.bar(xs,
results['adaptiveka'],
width=width,
edgecolor='black',
hatch='.',
label=names[2])
print(results['medes'])
ax.bar(xs + width,
results['medes'],
width=width,
edgecolor='black',
hatch='+',
label=names[0])
else:
print(results['fixedka'])
ax.bar(xs - width / 2,
results['fixedka'],
width=width,
edgecolor='black',
hatch='//',
label=names[1])
print(results['medes'])
ax.bar(xs + width / 2,
results['medes'],
width=width,
edgecolor='black',
hatch='+',
label=names[0])
ax.legend(loc="lower center",
ncol=3,
prop={
'weight': 'bold',
'size': 18
},
bbox_to_anchor=(0.5, 0.98),
columnspacing=1,
frameon=False)
ax.set_xticks(xs)
ax.set_xticklabels([])
yticks = np.linspace(0, y_upperbound, y_num_ticks)
ax.set_yticks(yticks)
ax.set_yticklabels([str(int(i)) for i in yticks],
fontsize=18,
fontweight='bold')
ax.grid(True, axis='y')
ax.set_ylabel('Number of\ncold starts', fontsize=18, fontweight='bold')
def lat_subplot(ax):
MIN_REQS = 0
MAX_REQS = 150000
exec_times = {
0: 150,
1: 250,
2: 1200,
3: 2000,
4: 500,
5: 400,
6: 400,
7: 1000,
8: 1000,
9: 3000
}
def read_logs(log_file, search_text, slowdown):
"""Matches `search_text` within the Text param of the log.
Returns a list of values on match from the entire log file.
Log file format:
Timestamp Location Severity Text Value
Search Text should be in lowercase
"""
values = []
appl_values = {}
start = False
with open(log_file, "r") as f:
while (line := f.readline().rstrip()):
log_line = line.split()
# Extract text
text = ' '.join(log_line[3:-1])
completed_text = 'completed reqs'
if completed_text in text.lower():
if int(log_line[-1]) >= MAX_REQS:
break
if completed_text in text.lower():
if int(log_line[-1]) >= MIN_REQS:
start = True
continue
if not start:
continue
if search_text in text.lower():
if float(log_line[-1]) > MAX_REQS:
continue
appl = int(log_line[-2])
a_slowdown = (float(log_line[-1]) +
exec_times[appl]) / exec_times[appl]
if float(log_line[-1]) > 10000:
continue
if slowdown:
values.append(a_slowdown)
else:
values.append(float(log_line[-1]))
if appl in appl_values:
appl_values[appl].append(
float(log_line[-1]) + exec_times[appl])
# appl_values[appl].append(a_slowdown)
else:
appl_values[appl] = [
float(log_line[-1]) + exec_times[appl]
]
# appl_values[appl] = [a_slowdown]
return values, appl_values
def get_best_percentile(latencies, perc_start, perc_end):
"""Returns the best percentile (in the perc range) from the startup latency array
Returns tuple: (FixedKA latency, AdaptiveKA latency, Dedup latency)
"""
# Get the Tail latency stats
latency_tuples = []
percentile = perc_start
while percentile <= perc_end:
tup = [percentile]
for l in latencies:
tup.append(np.percentile(l, percentile))
latency_tuples.append(tup)
percentile += 0.02
filtered_tup = [
t for t in latency_tuples if t[2] / t[1] > 1.2 and t[3] / t[1] > 1
]
if len(filtered_tup) == 0:
filtered_tup = [t for t in latency_tuples if t[2] / t[1] > 1.2]
if len(filtered_tup) == 0:
filtered_tup = latency_tuples
latency_adv = sorted(filtered_tup,
key=lambda t: t[2] / t[1],
reverse=True)
best_perc = 0
worst_perc = -1
if latency_adv[0][2] / latency_adv[0][1] < 1:
worst_perc = 1
print(latency_adv[best_perc][2] / latency_adv[best_perc][1],
latency_adv[best_perc][3] / latency_adv[best_perc][1])
print(latency_adv[worst_perc][2] / latency_adv[worst_perc][1],
latency_adv[worst_perc][3] / latency_adv[worst_perc][1])
if latency_adv[best_perc][0] > latency_adv[worst_perc][0]:
retval = [latency_adv[best_perc][1:], latency_adv[worst_perc][1:]]
else:
retval = [latency_adv[worst_perc][1:], latency_adv[best_perc][1:]]
print(retval)
return retval
y_upperbounds = [4000, 5000,
5000] # update this y-axis range as you see fit
y_num_ticks = [
5, 6, 6
] # how many ticks between 0 and y_upperbound (inclusive)are labeled
# The stats to fetch from the controller logfile
# starts = ["cold start time", "warm start time", "dedup start time"]
starts = ["cold rpc delay", "warm rpc delay", "dedup rpc delay"]
BINS = 10000
logfiles = []
for i in range(1, len(sys.argv)):
# The directory with all the logfiles
logfile_dir = sys.argv[i]
logfile = os.path.join(logfile_dir, "logfileC")
logfiles.append(logfile)
base_logfile_dir = sys.argv[1]
# appl_startup_bin_edges = {}
# appl_startup_cum_hist = {}
appl_latencies = {}
for logfile in logfiles:
appl_startups = {}
for start_text in starts:
_, appl_values = read_logs(logfile, start_text, True)
for appl in appl_values:
# print(max(appl_values[appl]), start_text, appl)
if appl not in appl_startups:
appl_startups[appl] = appl_values[appl]
else:
appl_startups[appl].extend(appl_values[appl])
for appl, startups in appl_startups.items():
# Compute histograms
pruned_startups = sorted(startups)
# pruned_startups = pruned_startups[:int(0.999 * len(pruned_startups))]
if appl in appl_latencies:
appl_latencies[appl].append(pruned_startups)
else:
appl_latencies[appl] = [pruned_startups]
# a separate plot for each percentage
pcts_to_plot = ['95', '99', '99.9']
percs = [95, 99.5, 99.9]
results = {}
for pct in pcts_to_plot:
results[pct] = {"fixedka": [], "adaptiveka": [], "medes": []}
sorted_appl_names = sorted(list(appl_names.keys()))
for appl in sorted_appl_names:
# get_best_percentile() returns the (FixedKA, AdaptiveKA, Dedup)
for i in range(len(percs) - 2):
perc = percs[i]
results[pcts_to_plot[i]]["fixedka"].append(
np.percentile(appl_latencies[appl][1], perc))
results[pcts_to_plot[i]]["adaptiveka"].append(
np.percentile(appl_latencies[appl][2], perc))
results[pcts_to_plot[i]]["medes"].append(
np.percentile(appl_latencies[appl][0], perc))
tup = get_best_percentile(appl_latencies[appl], 98, 99.9)
results[pcts_to_plot[1]]["fixedka"].append(tup[1][1])
results[pcts_to_plot[1]]["adaptiveka"].append(tup[1][2])
results[pcts_to_plot[1]]["medes"].append(tup[1][0])
results[pcts_to_plot[2]]["fixedka"].append(tup[0][1])
results[pcts_to_plot[2]]["adaptiveka"].append(tup[0][2])
results[pcts_to_plot[2]]["medes"].append(tup[0][0])
with open(os.path.join(base_logfile_dir, "results_start.pkl"), 'wb') as f:
pkl.dump(results, f)
with open(os.path.join(base_logfile_dir, "results_start.pkl"), 'rb') as f:
results = pkl.load(f)
print(results)
num_dirs = len(appl_names.keys())
barwidth = 0.2
xs = np.arange(num_dirs)
for pct, yub, ynt in zip(pcts_to_plot[-1:], y_upperbounds[-1:],
y_num_ticks[-1:]):
print(xs - barwidth)
print(results[pct]["fixedka"])
ax.bar(xs - barwidth,
results[pct]['fixedka'],
barwidth,
label='Fixed Keep-Alive',
hatch='//',
edgecolor='black')
ax.bar(xs,
results[pct]['adaptiveka'],
barwidth,
label='Adaptive Keep-Alive',
edgecolor='black',
hatch='.')
ax.bar(xs + barwidth,
results[pct]['medes'],
barwidth,
label='Medes',
hatch="+",
edgecolor='black')
# ax.legend(loc="upper left", ncol=1, prop={'weight': 'bold', 'size': 14})
ax.set_xticks([i for i in range(len(xs))])
ax.set_xticklabels(appl_names.values(),
fontsize=18,
fontweight='bold',
rotation=22.5)
yticks = np.linspace(0, yub, ynt)
ax.set_yticks(yticks)
ax.set_yticklabels([str(int(i)) for i in yticks],
fontsize=18,
fontweight='bold')
# ax.set_aspect('equal')
ax.grid(True, axis='y')
ax.set_xlabel('Function', fontsize=18, fontweight='bold')
ax.set_ylabel('99.9p end-to-end\nlatency (ms)',
fontsize=18,
fontweight='bold')
fig = plt.figure(figsize=(10, 6))
ax_cs = fig.add_subplot(2, 1, 1)
ax_lat = fig.add_subplot(2, 1, 2)
cs_subplot(ax_cs)
lat_subplot(ax_lat)
plt.subplots_adjust(bottom=0.18, top=0.92, left=0.12, right=0.98, hspace=0.2)
base_logfile_dir = sys.argv[1]
plt.savefig(os.path.join(base_logfile_dir, 'cs_lat.pdf'))
| [
"pickle.dump",
"numpy.percentile",
"matplotlib.pyplot.figure",
"pickle.load",
"numpy.arange",
"numpy.linspace",
"matplotlib.pyplot.subplots_adjust",
"os.path.join"
] | [((16382, 16409), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (16392, 16409), True, 'import matplotlib.pyplot as plt\n'), ((16516, 16593), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.18)', 'top': '(0.92)', 'left': '(0.12)', 'right': '(0.98)', 'hspace': '(0.2)'}), '(bottom=0.18, top=0.92, left=0.12, right=0.98, hspace=0.2)\n', (16535, 16593), True, 'import matplotlib.pyplot as plt\n'), ((7304, 7345), 'numpy.linspace', 'np.linspace', (['(0)', 'y_upperbound', 'y_num_ticks'], {}), '(0, y_upperbound, y_num_ticks)\n', (7315, 7345), True, 'import numpy as np\n'), ((14806, 14825), 'numpy.arange', 'np.arange', (['num_dirs'], {}), '(num_dirs)\n', (14815, 14825), True, 'import numpy as np\n'), ((16637, 16681), 'os.path.join', 'os.path.join', (['base_logfile_dir', '"""cs_lat.pdf"""'], {}), "(base_logfile_dir, 'cs_lat.pdf')\n", (16649, 16681), False, 'import os\n'), ((3984, 4021), 'os.path.join', 'os.path.join', (['logfile_dir', '"""logfileC"""'], {}), "(logfile_dir, 'logfileC')\n", (3996, 4021), False, 'import os\n'), ((12198, 12235), 'os.path.join', 'os.path.join', (['logfile_dir', '"""logfileC"""'], {}), "(logfile_dir, 'logfileC')\n", (12210, 12235), False, 'import os\n'), ((14588, 14608), 'pickle.dump', 'pkl.dump', (['results', 'f'], {}), '(results, f)\n', (14596, 14608), True, 'import pickle as pkl\n'), ((14707, 14718), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (14715, 14718), True, 'import pickle as pkl\n'), ((15911, 15935), 'numpy.linspace', 'np.linspace', (['(0)', 'yub', 'ynt'], {}), '(0, yub, ynt)\n', (15922, 15935), True, 'import numpy as np\n'), ((14515, 14566), 'os.path.join', 'os.path.join', (['base_logfile_dir', '"""results_start.pkl"""'], {}), "(base_logfile_dir, 'results_start.pkl')\n", (14527, 14566), False, 'import os\n'), ((14624, 14675), 'os.path.join', 'os.path.join', (['base_logfile_dir', '"""results_start.pkl"""'], {}), "(base_logfile_dir, 'results_start.pkl')\n", (14636, 14675), False, 'import os\n'), ((13775, 13819), 'numpy.percentile', 'np.percentile', (['appl_latencies[appl][1]', 'perc'], {}), '(appl_latencies[appl][1], perc)\n', (13788, 13819), True, 'import numpy as np\n'), ((13896, 13940), 'numpy.percentile', 'np.percentile', (['appl_latencies[appl][2]', 'perc'], {}), '(appl_latencies[appl][2], perc)\n', (13909, 13940), True, 'import numpy as np\n'), ((14012, 14056), 'numpy.percentile', 'np.percentile', (['appl_latencies[appl][0]', 'perc'], {}), '(appl_latencies[appl][0], perc)\n', (14025, 14056), True, 'import numpy as np\n'), ((10378, 10406), 'numpy.percentile', 'np.percentile', (['l', 'percentile'], {}), '(l, percentile)\n', (10391, 10406), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
import matplotlib.patches as patches
from scipy.interpolate import RectBivariateSpline
from LucasKanade import *
from TemplateCorrection import *
# write your script here, we recommend the above libraries for making your animation
frames = np.load('../data/carseq.npy')
frames0 = frames[:,:,0]
rectList0 = np.load('./carseqrects.npy')
rect = [59, 116, 145, 151]
rect0 = [59, 116, 145, 151]
width = rect[3] - rect[1]
length = rect[2] - rect[0]
rectList = []
rectList_new = []
for i in range(frames.shape[2]-1):
# plt.imshow(frames[:,:,i],cmap='gray')
# plt.pause(0.001)
a = rect.copy()
rectList.append(a)
It = frames[:,:,i]
It1 = frames[:,:,i+1]
p = LucasKanade(It, It1, rect)
rect[0] += p[0]
rect[1] += p[1]
rect[2] += p[0]
rect[3] += p[1]
#drift correction
p_star = TemplateCorrection(frames0, It1, rect0, rect)
rect[0] += p_star[0]
rect[1] += p_star[1]
rect[2] += p_star[0]
rect[3] += p_star[1]
b = rect.copy()
rectList_new.append(b)
num = i + 1
if num % 100 == 0 or num == 1:
plt.figure()
plt.imshow(frames[:,:,i],cmap='gray')
bbox0 = patches.Rectangle((int(rectList0[i,0]), int(rectList0[i,1])), length, width,
fill=False, edgecolor='red', linewidth=2)
plt.gca().add_patch(bbox0)
plt.show()
bbox1 = patches.Rectangle((int(rect[0]), int(rect[1])), length, width,
fill=False, edgecolor='blue', linewidth=2)
plt.gca().add_patch(bbox1)
plt.title('frame %d'%num)
np.save('carseqrects-wcrt.npy',rectList_new)
| [
"matplotlib.pyplot.title",
"numpy.load",
"numpy.save",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.gca"
] | [((325, 354), 'numpy.load', 'np.load', (['"""../data/carseq.npy"""'], {}), "('../data/carseq.npy')\n", (332, 354), True, 'import numpy as np\n'), ((391, 419), 'numpy.load', 'np.load', (['"""./carseqrects.npy"""'], {}), "('./carseqrects.npy')\n", (398, 419), True, 'import numpy as np\n'), ((1663, 1708), 'numpy.save', 'np.save', (['"""carseqrects-wcrt.npy"""', 'rectList_new'], {}), "('carseqrects-wcrt.npy', rectList_new)\n", (1670, 1708), True, 'import numpy as np\n'), ((1157, 1169), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1167, 1169), True, 'import matplotlib.pyplot as plt\n'), ((1178, 1218), 'matplotlib.pyplot.imshow', 'plt.imshow', (['frames[:, :, i]'], {'cmap': '"""gray"""'}), "(frames[:, :, i], cmap='gray')\n", (1188, 1218), True, 'import matplotlib.pyplot as plt\n'), ((1427, 1437), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1435, 1437), True, 'import matplotlib.pyplot as plt\n'), ((1636, 1663), 'matplotlib.pyplot.title', 'plt.title', (["('frame %d' % num)"], {}), "('frame %d' % num)\n", (1645, 1663), True, 'import matplotlib.pyplot as plt\n'), ((1392, 1401), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1399, 1401), True, 'import matplotlib.pyplot as plt\n'), ((1601, 1610), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1608, 1610), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import h5py
import os
import palettable as pal
palette = pal.wesanderson.Moonrise1_5.mpl_colormap
import networkx as nx
from networkx.drawing.nx_agraph import graphviz_layout
from pylab import *
import matplotlib.pyplot as plt
import matplotlib
class Conf:
outdir = "out"
if __name__ == "__main__":
# set up plotting and figure
#plt.fig = plt.figure(1, figsize=(3.4,2.5))
#fig = plt.figure(figsize=(3.54, 4.5)) #single column fig
#fig = plt.figure(figsize=(7.48, 4.0)) #two column figure
#plt.rc('font', family='serif', size=8)
#plt.rc('xtick')
#plt.rc('ytick')
#
#gs = plt.GridSpec(4, 1)
#gs.update(hspace = 0.0)
#
#axs = []
#axs.append( plt.subplot(gs[0]) )
#axs.append( plt.subplot(gs[1]) )
#axs.append( plt.subplot(gs[2]) )
#axs.append( plt.subplot(gs[3]) )
#for ax in axs:
# ax.minorticks_on()
conf = Conf()
ir = 0
rank = str(ir)
f5 = h5py.File(conf.outdir+"/run-"+rank+".h5", "r")
norm = matplotlib.colors.Normalize(vmin=0.0, vmax=4.0)
virs = f5['virtuals']
boun = f5['boundaries']
locs = f5['locals']
imgs = f5['grid'][:,:,:]
works = f5['work'][:,:,:]
Nx, Ny, Nt = np.shape(imgs)
#Nx, Ny = 10,10
print("image size nx {} ny {} nt {}".format(Nx, Ny, Nt))
#for t in range(Nt):
for t in range(0,101,1):
print("-------------------", t)
img = imgs[:,:,t]
work = works[:,:,t]
G = nx.grid_2d_graph(Nx,Ny, periodic=True)
pos = dict(zip(G.nodes(),G.nodes()))
ordering = [(y,Nx-1-x) for y in range(Ny) for x in range(Nx)]
labels = dict(zip(ordering, range(len(ordering))))
#nodes = G.nodes()
#print(nodes)
node_sizes = []
node_cols = []
labels = {}
for (i,j) in G.nodes():
#print(i,j)
node_sizes.append( 20.0*work[i,j] )
crank = img[i,j]
col = palette(norm(crank))
node_cols.append(col)
for i,j,d in G.edges(data=True):
w1 = work[i]
w2 = work[j]
#d['weight'] = 2.0/(w1 + w2)
r1 = img[i]
r2 = img[j]
if r1 == r2:
v = 0.1
else:
v = 10.0
d['weight'] = v
nx.draw_networkx(
G,
#pos=pos,
#pos = nx.spectral_layout(G, dim=2),
#pos = nx.circular_layout(G),
#pos = nx.shell_layout(G),
#pos = nx.spring_layout(G,pos=pos, iterations=1, scale=10.0),
#pos = graphviz_layout(G, prog='neato'),
pos = nx.kamada_kawai_layout(G),
with_labels=False,
node_size = node_sizes,
node_shape='s',
node_color = node_cols,
)
#nx.draw_networkx_labels(
# G,
# pos=pos,
# labels=labels)
plt.axis('off')
#plt.show()
slap = str(t).rjust(4, '0')
plt.savefig(conf.outdir+'/xgraph_'+slap+'.png')
| [
"h5py.File",
"matplotlib.pyplot.savefig",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.axis",
"networkx.kamada_kawai_layout",
"numpy.shape",
"networkx.grid_2d_graph"
] | [((968, 1020), 'h5py.File', 'h5py.File', (["(conf.outdir + '/run-' + rank + '.h5')", '"""r"""'], {}), "(conf.outdir + '/run-' + rank + '.h5', 'r')\n", (977, 1020), False, 'import h5py\n'), ((1027, 1074), 'matplotlib.colors.Normalize', 'matplotlib.colors.Normalize', ([], {'vmin': '(0.0)', 'vmax': '(4.0)'}), '(vmin=0.0, vmax=4.0)\n', (1054, 1074), False, 'import matplotlib\n'), ((1241, 1255), 'numpy.shape', 'np.shape', (['imgs'], {}), '(imgs)\n', (1249, 1255), True, 'import numpy as np\n'), ((1499, 1538), 'networkx.grid_2d_graph', 'nx.grid_2d_graph', (['Nx', 'Ny'], {'periodic': '(True)'}), '(Nx, Ny, periodic=True)\n', (1515, 1538), True, 'import networkx as nx\n'), ((3192, 3207), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3200, 3207), True, 'import matplotlib.pyplot as plt\n'), ((3338, 3391), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(conf.outdir + '/xgraph_' + slap + '.png')"], {}), "(conf.outdir + '/xgraph_' + slap + '.png')\n", (3349, 3391), True, 'import matplotlib.pyplot as plt\n'), ((2820, 2845), 'networkx.kamada_kawai_layout', 'nx.kamada_kawai_layout', (['G'], {}), '(G)\n', (2842, 2845), True, 'import networkx as nx\n')] |
import cv2
import numpy as np
import random
from affine_transform.utils import affine_transformation
class RandomTranslate(object):
"""Translate the given image using a randomly chosen amount in the given range.
Args:
shifts (tuple): The amount to translate the image in the x-axis and y-axis directions.
Returns:
dest_img (ndarray): Translated an image.
target (dict): Ground truth includes bounding boxes compatible with the translation.
"""
def __init__(self, shifts):
self.shifts = shifts
def __call__(self, image, target):
image = image.transpose(1,2,0)
bboxes = target['boxes']
img_h, img_w = image.shape[:2]
src = np.array([[0.0, 0.0],[0.0, 1.0],[1.0, 0.0]], np.float32)
dest = src.copy()
random_shift_x = random.randint(-self.shifts[0], self.shifts[0])
random_shift_y = random.randint(-self.shifts[1], self.shifts[1])
shifts_array = np.array((random_shift_x, random_shift_y))
dest = src + shifts_array.reshape(1,-1).astype(np.float32)
affine = cv2.getAffineTransform(src, dest)
image, bboxes = affine_transformation(image, bboxes, affine, img_w, img_h)
target['boxes'] = bboxes
return image, target
class RandomXTranslate(RandomTranslate):
"""Shear the given image along the x-axis with a randomly chosen amount in the given range.
Args:
shift (int): The amount to translate the image in the x-axis directions.
Returns:
dest_img (ndarray): Translated an image.
bboxes (ndarray): Bounding boxes compatible with the translation.
"""
def __init__(self, shift):
super().__init__(shift)
self.shifts = (shift, 0)
class RondomYTranslate(RandomTranslate):
"""Shear the given image along the y-axis with a randomly chosen amount in the given range.
Args:
shift (int): The amount to translate the image in the y-axis directions.
Returns:
dest_img (ndarray): Translated an image.
bboxes (ndarray): Bounding boxes compatible with the translation.
"""
def __init__(self, shift):
super().__init__(shift)
self.shifts = (0, shift)
| [
"affine_transform.utils.affine_transformation",
"cv2.getAffineTransform",
"numpy.array",
"random.randint"
] | [((751, 809), 'numpy.array', 'np.array', (['[[0.0, 0.0], [0.0, 1.0], [1.0, 0.0]]', 'np.float32'], {}), '([[0.0, 0.0], [0.0, 1.0], [1.0, 0.0]], np.float32)\n', (759, 809), True, 'import numpy as np\n'), ((859, 906), 'random.randint', 'random.randint', (['(-self.shifts[0])', 'self.shifts[0]'], {}), '(-self.shifts[0], self.shifts[0])\n', (873, 906), False, 'import random\n'), ((932, 979), 'random.randint', 'random.randint', (['(-self.shifts[1])', 'self.shifts[1]'], {}), '(-self.shifts[1], self.shifts[1])\n', (946, 979), False, 'import random\n'), ((1003, 1045), 'numpy.array', 'np.array', (['(random_shift_x, random_shift_y)'], {}), '((random_shift_x, random_shift_y))\n', (1011, 1045), True, 'import numpy as np\n'), ((1139, 1172), 'cv2.getAffineTransform', 'cv2.getAffineTransform', (['src', 'dest'], {}), '(src, dest)\n', (1161, 1172), False, 'import cv2\n'), ((1197, 1255), 'affine_transform.utils.affine_transformation', 'affine_transformation', (['image', 'bboxes', 'affine', 'img_w', 'img_h'], {}), '(image, bboxes, affine, img_w, img_h)\n', (1218, 1255), False, 'from affine_transform.utils import affine_transformation\n')] |
"""
solve the diffusion equation:
phi_t = k phi_{xx}
with a Crank-Nicolson implicit discretization
<NAME> (2013-04-03)
"""
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
from diffusion_explicit import Grid1d
import matplotlib as mpl
# Use LaTeX for rendering
mpl.rcParams['mathtext.fontset'] = 'cm'
mpl.rcParams['mathtext.rm'] = 'serif'
mpl.rcParams['font.size'] = 12
mpl.rcParams['legend.fontsize'] = 'large'
mpl.rcParams['figure.titlesize'] = 'small'
class Simulation(object):
def __init__(self, grid, k=1.0):
self.grid = grid
self.t = 0.0
self.k = k # diffusion coefficient
def init_cond(self, name, *args):
# initialize the data
if name == "gaussian":
t0, phi1, phi2 = args
self.grid.phi[:] = self.grid.phi_a(0.0, self.k, t0, phi1, phi2)
def diffuse_CN(self, dt):
"""
diffuse phi implicitly through timestep dt, with a C-N
temporal discretization
"""
gr = self.grid
phi = gr.phi
phinew = gr.scratch_array()
alpha = self.k*dt/gr.dx**2
# create the RHS of the matrix
gr.fill_BCs()
R = 0.5*self.k*dt*self.lap()
R = R[gr.ilo:gr.ihi+1]
R += phi[gr.ilo:gr.ihi+1]
# create the diagonal, d+1 and d-1 parts of the matrix
d = (1.0 + alpha)*np.ones(gr.nx)
u = -0.5*alpha*np.ones(gr.nx)
u[0] = 0.0
l = -0.5*alpha*np.ones(gr.nx)
l[gr.nx-1] = 0.0
# set the boundary conditions by changing the matrix elements
# homogeneous neumann
d[0] = 1.0 + 0.5*alpha
d[gr.nx-1] = 1.0 + 0.5*alpha
# Dirichlet
#d[0] = 1.0 + 1.5*alpha
#d[gr.nx-1] = 1.0 + 1.5*alpha
#R[0] += alpha*phi1
#R[gr.nx-1] += alpha*phi1
# solve
A = np.matrix([u,d,l])
phinew[gr.ilo:gr.ihi+1] = linalg.solve_banded((1,1), A, R)
return phinew
def lap(self):
""" compute the Laplacian of phi """
gr = self.grid
phi = gr.phi
lapphi = gr.scratch_array()
ib = gr.ilo
ie = gr.ihi
lapphi[ib:ie+1] = \
(phi[ib-1:ie] - 2.0*phi[ib:ie+1] + phi[ib+1:ie+2])/gr.dx**2
return lapphi
def evolve(self, C, tmax):
"""
the main evolution loop. Evolve
phi_t = k phi_{xx}
from t = 0 to tmax
"""
gr = self.grid
# time info
dt = C*0.5*gr.dx**2/self.k
while self.t < tmax:
gr.fill_BCs()
# make sure we end right at tmax
if self.t + dt > tmax:
dt = tmax - self.t
# diffuse for dt
phinew = self.diffuse_CN(dt)
gr.phi[:] = phinew[:]
self.t += dt
if __name__ == "__main__":
#--------------------------------------------------------------------------
# Convergence of a Gaussian
# a characteristic timescale for diffusion is L^2/k
tmax = 0.005
t0 = 1.e-4
phi1 = 1.0
phi2 = 2.0
k = 1.0
N = np.array([32, 64, 128, 256, 512])
# CFL number
CFL = [0.8, 8.0]
for C in CFL:
err = []
for nx in N:
# the present C-N discretization
print(C, nx)
g = Grid1d(nx, ng=1)
s = Simulation(g, k=k)
s.init_cond("gaussian", t0, phi1, phi2)
s.evolve(C, tmax)
xc = 0.5*(g.xmin + g.xmax)
phi_analytic = g.phi_a(tmax, k, t0, phi1, phi2)
err.append(g.norm(g.phi - phi_analytic))
plt.clf()
err = np.array(err)
plt.scatter(N, err, color="C0", label="C-N implicit diffusion")
plt.loglog(N, err[len(N)-1]*(N[len(N)-1]/N)**2,
color="C1", label="$\mathcal{O}(\Delta x^2)$")
plt.xlabel(r"$N$", fontsize="large")
plt.ylabel(r"L2 norm of absolute error")
plt.title("C-N Implicit Diffusion, C = %3.2f, t = %5.2g" % (C, tmax))
plt.ylim(1.e-6, 1.e-2)
plt.legend(frameon=False, fontsize="small")
plt.tight_layout()
plt.savefig("diffimplicit-converge-{}.pdf".format(C))
#-------------------------------------------------------------------------
# solution at multiple times
# diffusion coefficient
k = 1.0
# reference time
t0 = 1.e-4
# state coeffs
phi1 = 1.0
phi2 = 2.0
nx = 128
# a characteristic timescale for diffusion is 0.5*dx**2/k
dt = 0.5/(k*nx**2)
tmax = 100*dt
# analytic on a fine grid
nx_analytic = 512
CFL = [0.8, 8.0]
for C in CFL:
plt.clf()
ntimes = 5
tend = tmax/2.0**(ntimes-1)
c = ["C0", "C1", "C2", "C3", "C4"]
while tend <= tmax:
g = Grid1d(nx, ng=2)
s = Simulation(g, k=k)
s.init_cond("gaussian", t0, phi1, phi2)
s.evolve(C, tend)
ga = Grid1d(nx_analytic, ng=2)
xc = 0.5*(ga.xmin + ga.xmax)
phi_analytic = ga.phi_a(tend, k, t0, phi1, phi2)
color = c.pop()
plt.plot(g.x[g.ilo:g.ihi+1], g.phi[g.ilo:g.ihi+1],
"x", color=color, label="$t = %g$ s" % (tend))
plt.plot(ga.x[ga.ilo:ga.ihi+1], phi_analytic[ga.ilo:ga.ihi+1],
color=color, ls="-")
tend = 2.0*tend
plt.xlim(0.35,0.65)
plt.ylim(0.95,1.7)
plt.legend(frameon=False, fontsize="small")
plt.xlabel("$x$", fontsize="large")
plt.ylabel(r"$\phi$", fontsize="large")
plt.title(r"implicit diffusion, N = %d, $C$ = %3.2f" % (nx, C))
f = plt.gcf()
f.set_size_inches(8.0, 6.0)
plt.tight_layout()
plt.savefig("diff-implicit-{}-CFL_{}.pdf".format(nx, C))
# under-resolved example
plt.clf()
nx = 64
C = 10.0
tmax = 0.001
g = Grid1d(nx, ng=2)
s = Simulation(g, k=k)
s.init_cond("gaussian", t0, phi1, phi2)
s.evolve(C, tend)
ga = Grid1d(nx_analytic, ng=2)
xc = 0.5*(ga.xmin + ga.xmax)
phi_analytic = ga.phi_a(tend, k, t0, phi1, phi2)
plt.plot(g.x[g.ilo:g.ihi+1], g.phi[g.ilo:g.ihi+1],
color="r", marker="x", ls="-", label="$t = %g$ s" % (tend))
plt.plot(ga.x[ga.ilo:ga.ihi+1], phi_analytic[ga.ilo:ga.ihi+1],
color=color, ls="-")
plt.xlim(0.2,0.8)
plt.xlabel("$x$", fontsize="large")
plt.ylabel(r"$\phi$", fontsize="large")
plt.title(r"implicit diffusion, N = {}, $C$ = {:3.2f}, $t$ = {}".format(nx, C, tmax))
f = plt.gcf()
f.set_size_inches(8.0, 6.0)
plt.tight_layout()
plt.savefig("diff-implicit-{}-CFL_{}.pdf".format(nx, C))
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"numpy.matrix",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"numpy.ones",
"scipy.linalg.solve_banded",
"numpy.array",
"diffusion_explicit.Grid1d",
"m... | [((3108, 3141), 'numpy.array', 'np.array', (['[32, 64, 128, 256, 512]'], {}), '([32, 64, 128, 256, 512])\n', (3116, 3141), True, 'import numpy as np\n'), ((5901, 5910), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5908, 5910), True, 'import matplotlib.pyplot as plt\n'), ((5962, 5978), 'diffusion_explicit.Grid1d', 'Grid1d', (['nx'], {'ng': '(2)'}), '(nx, ng=2)\n', (5968, 5978), False, 'from diffusion_explicit import Grid1d\n'), ((6082, 6107), 'diffusion_explicit.Grid1d', 'Grid1d', (['nx_analytic'], {'ng': '(2)'}), '(nx_analytic, ng=2)\n', (6088, 6107), False, 'from diffusion_explicit import Grid1d\n'), ((6199, 6316), 'matplotlib.pyplot.plot', 'plt.plot', (['g.x[g.ilo:g.ihi + 1]', 'g.phi[g.ilo:g.ihi + 1]'], {'color': '"""r"""', 'marker': '"""x"""', 'ls': '"""-"""', 'label': "('$t = %g$ s' % tend)"}), "(g.x[g.ilo:g.ihi + 1], g.phi[g.ilo:g.ihi + 1], color='r', marker=\n 'x', ls='-', label='$t = %g$ s' % tend)\n", (6207, 6316), True, 'import matplotlib.pyplot as plt\n'), ((6327, 6419), 'matplotlib.pyplot.plot', 'plt.plot', (['ga.x[ga.ilo:ga.ihi + 1]', 'phi_analytic[ga.ilo:ga.ihi + 1]'], {'color': 'color', 'ls': '"""-"""'}), "(ga.x[ga.ilo:ga.ihi + 1], phi_analytic[ga.ilo:ga.ihi + 1], color=\n color, ls='-')\n", (6335, 6419), True, 'import matplotlib.pyplot as plt\n'), ((6429, 6447), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0.2)', '(0.8)'], {}), '(0.2, 0.8)\n', (6437, 6447), True, 'import matplotlib.pyplot as plt\n'), ((6452, 6487), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x$"""'], {'fontsize': '"""large"""'}), "('$x$', fontsize='large')\n", (6462, 6487), True, 'import matplotlib.pyplot as plt\n'), ((6492, 6531), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\phi$"""'], {'fontsize': '"""large"""'}), "('$\\\\phi$', fontsize='large')\n", (6502, 6531), True, 'import matplotlib.pyplot as plt\n'), ((6631, 6640), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (6638, 6640), True, 'import matplotlib.pyplot as plt\n'), ((6678, 6696), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6694, 6696), True, 'import matplotlib.pyplot as plt\n'), ((1866, 1886), 'numpy.matrix', 'np.matrix', (['[u, d, l]'], {}), '([u, d, l])\n', (1875, 1886), True, 'import numpy as np\n'), ((1919, 1952), 'scipy.linalg.solve_banded', 'linalg.solve_banded', (['(1, 1)', 'A', 'R'], {}), '((1, 1), A, R)\n', (1938, 1952), False, 'from scipy import linalg\n'), ((3643, 3652), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3650, 3652), True, 'import matplotlib.pyplot as plt\n'), ((3668, 3681), 'numpy.array', 'np.array', (['err'], {}), '(err)\n', (3676, 3681), True, 'import numpy as np\n'), ((3691, 3754), 'matplotlib.pyplot.scatter', 'plt.scatter', (['N', 'err'], {'color': '"""C0"""', 'label': '"""C-N implicit diffusion"""'}), "(N, err, color='C0', label='C-N implicit diffusion')\n", (3702, 3754), True, 'import matplotlib.pyplot as plt\n'), ((3887, 3922), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$N$"""'], {'fontsize': '"""large"""'}), "('$N$', fontsize='large')\n", (3897, 3922), True, 'import matplotlib.pyplot as plt\n'), ((3932, 3971), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""L2 norm of absolute error"""'], {}), "('L2 norm of absolute error')\n", (3942, 3971), True, 'import matplotlib.pyplot as plt\n'), ((3981, 4050), 'matplotlib.pyplot.title', 'plt.title', (["('C-N Implicit Diffusion, C = %3.2f, t = %5.2g' % (C, tmax))"], {}), "('C-N Implicit Diffusion, C = %3.2f, t = %5.2g' % (C, tmax))\n", (3990, 4050), True, 'import matplotlib.pyplot as plt\n'), ((4060, 4081), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(1e-06)', '(0.01)'], {}), '(1e-06, 0.01)\n', (4068, 4081), True, 'import matplotlib.pyplot as plt\n'), ((4091, 4134), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(False)', 'fontsize': '"""small"""'}), "(frameon=False, fontsize='small')\n", (4101, 4134), True, 'import matplotlib.pyplot as plt\n'), ((4144, 4162), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4160, 4162), True, 'import matplotlib.pyplot as plt\n'), ((4697, 4706), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4704, 4706), True, 'import matplotlib.pyplot as plt\n'), ((5449, 5469), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0.35)', '(0.65)'], {}), '(0.35, 0.65)\n', (5457, 5469), True, 'import matplotlib.pyplot as plt\n'), ((5477, 5496), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.95)', '(1.7)'], {}), '(0.95, 1.7)\n', (5485, 5496), True, 'import matplotlib.pyplot as plt\n'), ((5505, 5548), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(False)', 'fontsize': '"""small"""'}), "(frameon=False, fontsize='small')\n", (5515, 5548), True, 'import matplotlib.pyplot as plt\n'), ((5558, 5593), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x$"""'], {'fontsize': '"""large"""'}), "('$x$', fontsize='large')\n", (5568, 5593), True, 'import matplotlib.pyplot as plt\n'), ((5602, 5641), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\phi$"""'], {'fontsize': '"""large"""'}), "('$\\\\phi$', fontsize='large')\n", (5612, 5641), True, 'import matplotlib.pyplot as plt\n'), ((5650, 5712), 'matplotlib.pyplot.title', 'plt.title', (["('implicit diffusion, N = %d, $C$ = %3.2f' % (nx, C))"], {}), "('implicit diffusion, N = %d, $C$ = %3.2f' % (nx, C))\n", (5659, 5712), True, 'import matplotlib.pyplot as plt\n'), ((5727, 5736), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5734, 5736), True, 'import matplotlib.pyplot as plt\n'), ((5782, 5800), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5798, 5800), True, 'import matplotlib.pyplot as plt\n'), ((1377, 1391), 'numpy.ones', 'np.ones', (['gr.nx'], {}), '(gr.nx)\n', (1384, 1391), True, 'import numpy as np\n'), ((1415, 1429), 'numpy.ones', 'np.ones', (['gr.nx'], {}), '(gr.nx)\n', (1422, 1429), True, 'import numpy as np\n'), ((1473, 1487), 'numpy.ones', 'np.ones', (['gr.nx'], {}), '(gr.nx)\n', (1480, 1487), True, 'import numpy as np\n'), ((3326, 3342), 'diffusion_explicit.Grid1d', 'Grid1d', (['nx'], {'ng': '(1)'}), '(nx, ng=1)\n', (3332, 3342), False, 'from diffusion_explicit import Grid1d\n'), ((4853, 4869), 'diffusion_explicit.Grid1d', 'Grid1d', (['nx'], {'ng': '(2)'}), '(nx, ng=2)\n', (4859, 4869), False, 'from diffusion_explicit import Grid1d\n'), ((5005, 5030), 'diffusion_explicit.Grid1d', 'Grid1d', (['nx_analytic'], {'ng': '(2)'}), '(nx_analytic, ng=2)\n', (5011, 5030), False, 'from diffusion_explicit import Grid1d\n'), ((5174, 5277), 'matplotlib.pyplot.plot', 'plt.plot', (['g.x[g.ilo:g.ihi + 1]', 'g.phi[g.ilo:g.ihi + 1]', '"""x"""'], {'color': 'color', 'label': "('$t = %g$ s' % tend)"}), "(g.x[g.ilo:g.ihi + 1], g.phi[g.ilo:g.ihi + 1], 'x', color=color,\n label='$t = %g$ s' % tend)\n", (5182, 5277), True, 'import matplotlib.pyplot as plt\n'), ((5305, 5397), 'matplotlib.pyplot.plot', 'plt.plot', (['ga.x[ga.ilo:ga.ihi + 1]', 'phi_analytic[ga.ilo:ga.ihi + 1]'], {'color': 'color', 'ls': '"""-"""'}), "(ga.x[ga.ilo:ga.ihi + 1], phi_analytic[ga.ilo:ga.ihi + 1], color=\n color, ls='-')\n", (5313, 5397), True, 'import matplotlib.pyplot as plt\n')] |
############################################################################################
#
# Project: Peter Moss Acute Myeloid & Lymphoblastic Leukemia AI Research Project
# Repository: ALL Detection System 2020
# Project: AllDS2020 CNN
#
# Author: <NAME> (<EMAIL>)
# Contributors:
# Title: Data helper class
# Description: Data functions for the Acute Lymphoblastic Leukemia Tensorflow CNN 2020.
# License: MIT License
# Last Modified: 2020-07-23
#
############################################################################################
import cv2
import pathlib
import random
import os
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
from numpy.random import seed
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.utils import shuffle
from scipy import ndimage
from skimage import transform as tm
from Classes.Helpers import Helpers
from Classes.Augmentation import Augmentation
class Data():
""" Data class
Data functions for the Acute Lymphoblastic Leukemia Tensorflow CNN 2020.
"""
def __init__(self):
""" Initializes the class. """
self.Helpers = Helpers("Data", False)
self.seed = self.Helpers.confs["cnn"]["data"]["seed"]
self.dim = self.Helpers.confs["cnn"]["data"]["dim"]
seed(self.seed)
random.seed(self.seed)
self.data = []
self.labels = []
self.Helpers.logger.info("Data class initialization complete.")
def do_im_process(self):
""" Sorts the training data and labels for your model. """
aug = Augmentation()
data_dir = pathlib.Path(
self.Helpers.confs["cnn"]["data"]["train_dir"])
data = list(data_dir.glob(
'*' + self.Helpers.confs["cnn"]["data"]["file_type"]))
count = 0
neg_count = 0
pos_count = 0
augmented_data = []
augmented_labels = []
for rimage in data:
fpath = str(rimage)
fname = os.path.basename(rimage)
label = 0 if "_0" in fname else 1
image = self.resize(fpath, self.dim)
if image.shape[2] == 1:
image = np.dstack(
[image, image, image])
augmented_data.append(image.astype(np.float32)/255.)
augmented_labels.append(label)
augmented_data.append(aug.grayscale(image))
augmented_labels.append(label)
augmented_data.append(aug.equalize_hist(image))
augmented_labels.append(label)
horizontal, vertical = aug.reflection(image)
augmented_data.append(horizontal)
augmented_labels.append(label)
augmented_data.append(vertical)
augmented_labels.append(label)
augmented_data.append(aug.gaussian(image))
augmented_labels.append(label)
augmented_data.append(aug.translate(image))
augmented_labels.append(label)
augmented_data.append(aug.shear(image))
augmented_labels.append(label)
self.data, self.labels = aug.rotation(image, label, augmented_data, augmented_labels)
if "_0" in fname:
neg_count += 9
else:
pos_count += 9
count += 9
self.shuffle()
self.convert_data()
self.encode_labels()
self.Helpers.logger.info("Raw data: " + str(count))
self.Helpers.logger.info("Raw negative data: " + str(neg_count))
self.Helpers.logger.info("Raw positive data: " + str(count))
self.Helpers.logger.info("Augmented data: " + str(self.data.shape))
self.Helpers.logger.info("Labels: " + str(self.labels.shape))
self.get_split()
def convert_data(self):
""" Converts the training data to a numpy array. """
self.data = np.array(self.data)
self.Helpers.logger.info("Data shape: " + str(self.data.shape))
def encode_labels(self):
""" One Hot Encodes the labels. """
encoder = OneHotEncoder(categories='auto')
self.labels = np.reshape(self.labels, (-1, 1))
self.labels = encoder.fit_transform(self.labels).toarray()
self.Helpers.logger.info("Labels shape: " + str(self.labels.shape))
def shuffle(self):
""" Shuffles the data and labels. """
self.data, self.labels = shuffle(self.data, self.labels, random_state=self.seed)
def get_split(self):
""" Splits the data and labels creating training and validation datasets. """
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
self.data, self.labels, test_size=0.255, random_state=self.seed)
self.Helpers.logger.info("Training data: " + str(self.X_train.shape))
self.Helpers.logger.info("Training labels: " + str(self.y_train.shape))
self.Helpers.logger.info("Validation data: " + str(self.X_test.shape))
self.Helpers.logger.info("Validation labels: " + str(self.y_test.shape))
def resize(self, path, dim):
""" Resizes an image to the provided dimensions (dim). """
return cv2.resize(cv2.imread(path), (dim, dim)) | [
"numpy.dstack",
"numpy.random.seed",
"os.path.basename",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.OneHotEncoder",
"cv2.imread",
"pathlib.Path",
"random.seed",
"numpy.array",
"numpy.reshape",
"Classes.Helpers.Helpers",
"sklearn.utils.shuffle",
"Classes.Augmentation.A... | [((1238, 1260), 'Classes.Helpers.Helpers', 'Helpers', (['"""Data"""', '(False)'], {}), "('Data', False)\n", (1245, 1260), False, 'from Classes.Helpers import Helpers\n'), ((1413, 1428), 'numpy.random.seed', 'seed', (['self.seed'], {}), '(self.seed)\n', (1417, 1428), False, 'from numpy.random import seed\n'), ((1437, 1459), 'random.seed', 'random.seed', (['self.seed'], {}), '(self.seed)\n', (1448, 1459), False, 'import random\n'), ((1714, 1728), 'Classes.Augmentation.Augmentation', 'Augmentation', ([], {}), '()\n', (1726, 1728), False, 'from Classes.Augmentation import Augmentation\n'), ((1749, 1809), 'pathlib.Path', 'pathlib.Path', (["self.Helpers.confs['cnn']['data']['train_dir']"], {}), "(self.Helpers.confs['cnn']['data']['train_dir'])\n", (1761, 1809), False, 'import pathlib\n'), ((4044, 4063), 'numpy.array', 'np.array', (['self.data'], {}), '(self.data)\n', (4052, 4063), True, 'import numpy as np\n'), ((4229, 4261), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'categories': '"""auto"""'}), "(categories='auto')\n", (4242, 4261), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((4285, 4317), 'numpy.reshape', 'np.reshape', (['self.labels', '(-1, 1)'], {}), '(self.labels, (-1, 1))\n', (4295, 4317), True, 'import numpy as np\n'), ((4565, 4620), 'sklearn.utils.shuffle', 'shuffle', (['self.data', 'self.labels'], {'random_state': 'self.seed'}), '(self.data, self.labels, random_state=self.seed)\n', (4572, 4620), False, 'from sklearn.utils import shuffle\n'), ((4797, 4883), 'sklearn.model_selection.train_test_split', 'train_test_split', (['self.data', 'self.labels'], {'test_size': '(0.255)', 'random_state': 'self.seed'}), '(self.data, self.labels, test_size=0.255, random_state=self\n .seed)\n', (4813, 4883), False, 'from sklearn.model_selection import train_test_split\n'), ((2136, 2160), 'os.path.basename', 'os.path.basename', (['rimage'], {}), '(rimage)\n', (2152, 2160), False, 'import os\n'), ((5339, 5355), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (5349, 5355), False, 'import cv2\n'), ((2318, 2350), 'numpy.dstack', 'np.dstack', (['[image, image, image]'], {}), '([image, image, image])\n', (2327, 2350), True, 'import numpy as np\n')] |
import networkx as nx
import random
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import math
import time
import sys
G = nx.Graph()
ListI = []
ListSI = []
lam = 0.6
mu = 1.0
global tGlobal
tGlobal = 0
c = 0
NUMBEROFNODE = int(1e4)
MAXNEIGHBORCOUNT = 100
def Init_Data(G,ListI):
biState = np.random.binomial(1, 0.95,NUMBEROFNODE)
for node in range(NUMBEROFNODE):
G.add_node(node)
if biState[node] == 0:
ListI.append(node)
def Init_ListSI(G,ListI,ListSI):
sumOfProb = 0
for neighborNumber in range(3,MAXNEIGHBORCOUNT+1):
sumOfProb += math.pow(neighborNumber,-2)
distribution = [None]*(MAXNEIGHBORCOUNT+1)
distribution[0] = 0
distribution[1] = 0
distribution[2] = 0
for neighborNumber in range(3,MAXNEIGHBORCOUNT+1):
distribution[neighborNumber] = int(math.pow(neighborNumber,-2)/sumOfProb * NUMBEROFNODE)
RESTNUMBER = NUMBEROFNODE - sum(distribution)
distribution[3] = distribution[3]+RESTNUMBER
nodeid = 0
for i in range(len(distribution)):
if distribution[i] == 0:
continue
else:
while(distribution[i] != 0):
G.nodes[nodeid]['neighborNumber'] = i
distribution[i] = distribution[i] - 1
nodeid = nodeid + 1
for node in G.__iter__():
if len(list(G.neighbors(node))) >= G.nodes[node]['neighborNumber']:
continue
else:
neighborNumberToChoose = G.nodes[node]['neighborNumber'] - len(list(G.neighbors(node)))
if node == NUMBEROFNODE-1:
break
for i in range(neighborNumberToChoose):
neighborNew = random.choice(range(node+1,NUMBEROFNODE))
if (G.nodes[neighborNew]['neighborNumber'] <= len(list(G.neighbors(neighborNew)))):
continue
# neighborNew = random.choice(range(node+1,NUMBEROFNODE))
G.add_edge(node,neighborNew)
if node in ListI and neighborNew not in ListI:
ListSI.append([node,neighborNew])
global c
c = mu*len(ListI)+lam*len(ListSI)
# print(c)
def chooseEvent(lam,mu,ListI,ListSI):
global c
while c != 0:
biChoice = np.random.binomial(1,mu*len(ListI)/c,1)
if biChoice == 1:
GET_S_EVENT(G,ListI,ListSI)
break
else:
GET_I_EVENT(G,ListI,ListSI)
break
def GET_S_EVENT(G,ListI,ListSI):
node = random.choice(ListI)
ListI.remove(node)
for edge in ListSI:
if edge[0] == node:
ListSI.remove(edge)
global c
c = mu*len(ListI)+lam*len(ListSI)
global tGlobal
tGlobal = tGlobal + 0.0025
def GET_I_EVENT(G,ListI,ListSI):
edge = random.choice(ListSI)
ListSI.remove(edge)
ListI.append(edge[1])
for node in list(G.neighbors(edge[1])):
ListSI.append([edge[1],node])
global c
c = mu*len(ListI)+lam*len(ListSI)
global tGlobal
tGlobal = tGlobal + 0.0025
def main(argv=None):
if argv is None:
argv = sys.argv
IPercent = []
SPercent = []
Tlist = []
start =time.perf_counter()
c = 0
Init_Data(G,ListI)
Init_ListSI(G,ListI,ListSI)
while True:
if c == 0:
break
if len(ListI) == 0:
break
if tGlobal > 10.0:
break
chooseEvent(lam,mu,ListI,ListSI)
IPercent.append(len(ListI)/NUMBEROFNODE)
SPercent.append(1-len(ListI)/NUMBEROFNODE)
Tlist.append(tGlobal)
end = time.perf_counter()
print('Running time: %s Seconds'%(end-start))
# print(Tlist)
plt.plot(Tlist,IPercent)
plt.plot(Tlist,SPercent)
plt.show()
return 0
if __name__ == '__main__':
sys.exit(main())
| [
"numpy.random.binomial",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"math.pow",
"time.perf_counter",
"random.choice",
"networkx.Graph"
] | [((159, 169), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (167, 169), True, 'import networkx as nx\n'), ((332, 373), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.95)', 'NUMBEROFNODE'], {}), '(1, 0.95, NUMBEROFNODE)\n', (350, 373), True, 'import numpy as np\n'), ((2534, 2554), 'random.choice', 'random.choice', (['ListI'], {}), '(ListI)\n', (2547, 2554), False, 'import random\n'), ((2813, 2834), 'random.choice', 'random.choice', (['ListSI'], {}), '(ListSI)\n', (2826, 2834), False, 'import random\n'), ((3201, 3220), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3218, 3220), False, 'import time\n'), ((3611, 3630), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3628, 3630), False, 'import time\n'), ((3705, 3730), 'matplotlib.pyplot.plot', 'plt.plot', (['Tlist', 'IPercent'], {}), '(Tlist, IPercent)\n', (3713, 3730), True, 'import matplotlib.pyplot as plt\n'), ((3734, 3759), 'matplotlib.pyplot.plot', 'plt.plot', (['Tlist', 'SPercent'], {}), '(Tlist, SPercent)\n', (3742, 3759), True, 'import matplotlib.pyplot as plt\n'), ((3764, 3774), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3772, 3774), True, 'import matplotlib.pyplot as plt\n'), ((634, 662), 'math.pow', 'math.pow', (['neighborNumber', '(-2)'], {}), '(neighborNumber, -2)\n', (642, 662), False, 'import math\n'), ((880, 908), 'math.pow', 'math.pow', (['neighborNumber', '(-2)'], {}), '(neighborNumber, -2)\n', (888, 908), False, 'import math\n')] |
""" Models that use various Approximate Nearest Neighbours libraries in order to quickly
generate recommendations and lists of similar items.
See http://www.benfrederickson.com/approximate-nearest-neighbours-for-recommender-systems/
"""
import itertools
import logging
import numpy
from implicit.als import AlternatingLeastSquares
def augment_inner_product_matrix(factors):
""" This function transforms a factor matrix such that an angular nearest neighbours search
will return top related items of the inner product.
This involves transforming each row by adding one extra dimension as suggested in the paper:
"Speeding Up the Xbox Recommender System Using a Euclidean Transformation for Inner-Product
Spaces" https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/XboxInnerProduct.pdf
Basically this involves transforming each feature vector so that they have the same norm, which
means the cosine of this transformed vector is proportional to the dot product (if the other
vector in the cosine has a 0 in the extra dimension). """
norms = numpy.linalg.norm(factors, axis=1)
max_norm = norms.max()
# add an extra dimension so that the norm of each row is the same
# (max_norm)
extra_dimension = numpy.sqrt(max_norm ** 2 - norms ** 2)
return max_norm, numpy.append(factors, extra_dimension.reshape(norms.shape[0], 1), axis=1)
class NMSLibAlternatingLeastSquares(AlternatingLeastSquares):
""" Speeds up the base :class:`~implicit.als.AlternatingLeastSquares` model by using
`NMSLib <https://github.com/searchivarius/nmslib>`_ to create approximate nearest neighbours
indices of the latent factors.
Parameters
----------
method : str, optional
The NMSLib method to use
index_params: dict, optional
Optional params to send to the createIndex call in NMSLib
query_params: dict, optional
Optional query time params for the NMSLib 'setQueryTimeParams' call
approximate_similar_items : bool, optional
whether or not to build an NMSLIB index for computing similar_items
approximate_recommend : bool, optional
whether or not to build an NMSLIB index for the recommend call
Attributes
----------
similar_items_index : nmslib.FloatIndex
NMSLib index for looking up similar items in the cosine space formed by the latent
item_factors
recommend_index : nmslib.FloatIndex
NMSLib index for looking up similar items in the inner product space formed by the latent
item_factors
"""
def __init__(self,
approximate_similar_items=True, approximate_recommend=True,
method='hnsw', index_params=None, query_params=None, *args, **kwargs):
if index_params is None:
index_params = {'M': 32, 'post': 2, 'efConstruction': 800}
if query_params is None:
query_params = {'ef': 50}
self.similar_items_index = None
self.recommend_index = None
self.approximate_similar_items = approximate_similar_items
self.approximate_recommend = approximate_recommend
self.method = method
self.index_params = index_params
self.query_params = query_params
super(NMSLibAlternatingLeastSquares, self).__init__(*args, **kwargs)
def fit(self, Ciu):
# nmslib can be a little chatty when first imported, disable some of
# the logging
logging.getLogger('nmslib').setLevel(logging.WARNING)
import nmslib
# train the model
super(NMSLibAlternatingLeastSquares, self).fit(Ciu)
# create index for similar_items
if self.approximate_similar_items:
self.similar_items_index = nmslib.init(
method=self.method, space='cosinesimil')
# there are some numerical instability issues here with
# building a cosine index with vectors with 0 norms, hack around this
# by just not indexing them
norms = numpy.linalg.norm(self.item_factors, axis=1)
ids = numpy.arange(self.item_factors.shape[0])
# delete zero valued rows from the matrix
item_factors = numpy.delete(self.item_factors, ids[norms == 0], axis=0)
ids = ids[norms != 0]
self.similar_items_index.addDataPointBatch(item_factors, ids=ids)
self.similar_items_index.createIndex(self.index_params)
self.similar_items_index.setQueryTimeParams(self.query_params)
# build up a separate index for the inner product (for recommend
# methods)
if self.approximate_recommend:
self.max_norm, extra = augment_inner_product_matrix(
self.item_factors)
self.recommend_index = nmslib.init(
method='hnsw', space='cosinesimil')
self.recommend_index.addDataPointBatch(extra)
self.recommend_index.createIndex(self.index_params)
self.recommend_index.setQueryTimeParams(self.query_params)
def similar_items(self, itemid, N=10):
if not self.approximate_similar_items:
return super(NMSLibAlternatingLeastSquares, self).similar_items(itemid, N)
neighbours, distances = self.similar_items_index.knnQuery(
self.item_factors[itemid], N)
return zip(neighbours, 1.0 - distances)
def recommend(self, userid, user_items, N=10, filter_items=None, recalculate_user=False):
if not self.approximate_recommend:
return super(NMSLibAlternatingLeastSquares,
self).recommend(userid, user_items, N=N,
filter_items=filter_items,
recalculate_user=recalculate_user)
user = self._user_factor(userid, user_items, recalculate_user)
# calculate the top N items, removing the users own liked items from
# the results
liked = set(user_items[userid].indices)
if filter_items:
liked.update(filter_items)
count = N + len(liked)
query = numpy.append(user, 0)
ids, dist = self.recommend_index.knnQuery(query, count)
# convert the distances from euclidean to cosine distance,
# and then rescale the cosine distance to go back to inner product
scaling = self.max_norm * numpy.linalg.norm(query)
dist = scaling * (1.0 - dist)
return list(itertools.islice((rec for rec in zip(ids, dist) if rec[0] not in liked), N))
class AnnoyAlternatingLeastSquares(AlternatingLeastSquares):
"""A version of the :class:`~implicit.als.AlternatingLeastSquares` model that uses an
`Annoy <https://github.com/spotify/annoy>`_ index to calculate similar items and
recommend items.
Parameters
----------
n_trees : int, optional
The number of trees to use when building the Annoy index. More trees gives higher precision
when querying.
search_k : int, optional
Provides a way to search more trees at runtime, giving the ability to have more accurate
results at the cost of taking more time.
approximate_similar_items : bool, optional
whether or not to build an Annoy index for computing similar_items
approximate_recommend : bool, optional
whether or not to build an Annoy index for the recommend call
Attributes
----------
similar_items_index : annoy.AnnoyIndex
Annoy index for looking up similar items in the cosine space formed by the latent
item_factors
recommend_index : annoy.AnnoyIndex
Annoy index for looking up similar items in the inner product space formed by the latent
item_factors
"""
def __init__(self, approximate_similar_items=True, approximate_recommend=True,
n_trees=50, search_k=-1, *args, **kwargs):
super(AnnoyAlternatingLeastSquares, self).__init__(*args, **kwargs)
self.similar_items_index = None
self.recommend_index = None
self.approximate_similar_items = approximate_similar_items
self.approximate_recommend = approximate_recommend
self.n_trees = n_trees
self.search_k = search_k
def fit(self, Ciu):
# delay loading the annoy library in case its not installed here
import annoy
# train the model
super(AnnoyAlternatingLeastSquares, self).fit(Ciu)
# build up an Annoy Index with all the item_factors (for calculating
# similar items)
self.similar_items_index = annoy.AnnoyIndex(
self.item_factors.shape[1], 'angular')
for i, row in enumerate(self.item_factors):
self.similar_items_index.add_item(i, row)
self.similar_items_index.build(self.n_trees)
# build up a separate index for the inner product (for recommend
# methods)
self.max_norm, extra = augment_inner_product_matrix(self.item_factors)
self.recommend_index = annoy.AnnoyIndex(extra.shape[1], 'angular')
for i, row in enumerate(extra):
self.recommend_index.add_item(i, row)
self.recommend_index.build(self.n_trees)
def similar_items(self, itemid, N=10):
if not self.approximate_similar_items:
return super(AnnoyAlternatingLeastSquares, self).similar_items(itemid, N)
neighbours, dist = self.similar_items_index.get_nns_by_item(itemid, N,
search_k=self.search_k,
include_distances=True)
# transform distances back to cosine from euclidean distance
return zip(neighbours, 1 - (numpy.array(dist) ** 2) / 2)
def recommend(self, userid, user_items, N=10, filter_items=None, recalculate_user=False):
if not self.approximate_recommend:
return super(AnnoyAlternatingLeastSquares,
self).recommend(userid, user_items, N=N,
filter_items=filter_items,
recalculate_user=recalculate_user)
user = self._user_factor(userid, user_items, recalculate_user)
# calculate the top N items, removing the users own liked items from
# the results
liked = set(user_items[userid].indices)
if filter_items:
liked.update(filter_items)
count = N + len(liked)
query = numpy.append(user, 0)
ids, dist = self.recommend_index.get_nns_by_vector(query, count, include_distances=True,
search_k=self.search_k)
# convert the distances from euclidean to cosine distance,
# and then rescale the cosine distance to go back to inner product
scaling = self.max_norm * numpy.linalg.norm(query)
dist = scaling * (1 - (numpy.array(dist) ** 2) / 2)
return list(itertools.islice((rec for rec in zip(ids, dist) if rec[0] not in liked), N))
class FaissAlternatingLeastSquares(AlternatingLeastSquares):
""" Speeds up the base :class:`~implicit.als.AlternatingLeastSquares` model by using
`Faiss <https://github.com/facebookresearch/faiss>`_ to create approximate nearest neighbours
indices of the latent factors.
Parameters
----------
nlist : int, optional
The number of cells to use when building the Faiss index.
nprobe : int, optional
The number of cells to visit to perform a search.
gpu : bool, optional
Whether or not to enable run Faiss on the GPU. Requires faiss to have been
built with GPU support.
approximate_similar_items : bool, optional
whether or not to build an Faiss index for computing similar_items
approximate_recommend : bool, optional
whether or not to build an Faiss index for the recommend call
Attributes
----------
similar_items_index : faiss.IndexIVFFlat
Faiss index for looking up similar items in the cosine space formed by the latent
item_factors
recommend_index : faiss.IndexIVFFlat
Faiss index for looking up similar items in the inner product space formed by the latent
item_factors
"""
def __init__(self, approximate_similar_items=True, approximate_recommend=True,
nlist=400, nprobe=20, gpu=False, *args, **kwargs):
self.similar_items_index = None
self.recommend_index = None
self.approximate_similar_items = approximate_similar_items
self.approximate_recommend = approximate_recommend
self.gpu = gpu
# hyper-parameters for FAISS
self.nlist = nlist
self.nprobe = nprobe
super(FaissAlternatingLeastSquares, self).__init__(*args, **kwargs)
def fit(self, Ciu):
import faiss
# train the model
super(FaissAlternatingLeastSquares, self).fit(Ciu)
self.quantizer = faiss.IndexFlat(self.factors)
if self.gpu:
self.gpu_resources = faiss.StandardGpuResources()
item_factors = self.item_factors.astype('float32')
if self.approximate_recommend:
# build up a inner product index here
if self.gpu:
index = faiss.GpuIndexIVFFlat(self.gpu_resources, self.factors, self.nlist,
faiss.METRIC_INNER_PRODUCT)
else:
index = faiss.IndexIVFFlat(self.quantizer, self.factors, self.nlist,
faiss.METRIC_INNER_PRODUCT)
index.train(item_factors)
index.add(item_factors)
index.nprobe = self.nprobe
self.recommend_index = index
if self.approximate_similar_items:
# likewise build up cosine index for similar_items, using an inner product
# index on normalized vectors`
norms = numpy.linalg.norm(item_factors, axis=1)
norms[norms == 0] = 1e-10
normalized = (item_factors.T / norms).T.astype('float32')
if self.gpu:
index = faiss.GpuIndexIVFFlat(self.gpu_resources, self.factors, self.nlist,
faiss.METRIC_INNER_PRODUCT)
else:
index = faiss.IndexIVFFlat(self.quantizer, self.factors, self.nlist,
faiss.METRIC_INNER_PRODUCT)
index.train(normalized)
index.add(normalized)
index.nprobe = self.nprobe
self.similar_items_index = index
def similar_items(self, itemid, N=10):
if not self.approximate_similar_items:
return super(FaissAlternatingLeastSquares, self).similar_items(itemid, N)
factors = self.item_factors[itemid]
factors /= numpy.linalg.norm(factors)
(dist,), (ids,) = self.similar_items_index.search(factors.reshape(1, -1).astype('float32'),
N)
return zip(ids, dist)
def recommend(self, userid, user_items, N=10, filter_items=None, recalculate_user=False):
if not self.approximate_recommend:
return super(FaissAlternatingLeastSquares,
self).recommend(userid, user_items, N=N,
filter_items=filter_items,
recalculate_user=recalculate_user)
user = self._user_factor(userid, user_items, recalculate_user)
# calculate the top N items, removing the users own liked items from
# the results
liked = set(user_items[userid].indices)
if filter_items:
liked.update(filter_items)
count = N + len(liked)
# faiss expects multiple queries - convert query to a matrix
# and results back to single vectors
query = user.reshape(1, -1).astype('float32')
(dist,), (ids,) = self.recommend_index.search(query, count)
# convert the distances from euclidean to cosine distance,
# and then rescale the cosine distance to go back to inner product
return list(itertools.islice((rec for rec in zip(ids, dist) if rec[0] not in liked), N))
| [
"faiss.GpuIndexIVFFlat",
"nmslib.init",
"faiss.IndexIVFFlat",
"faiss.StandardGpuResources",
"numpy.append",
"faiss.IndexFlat",
"numpy.linalg.norm",
"numpy.arange",
"numpy.array",
"annoy.AnnoyIndex",
"numpy.delete",
"logging.getLogger",
"numpy.sqrt"
] | [((1098, 1132), 'numpy.linalg.norm', 'numpy.linalg.norm', (['factors'], {'axis': '(1)'}), '(factors, axis=1)\n', (1115, 1132), False, 'import numpy\n'), ((1270, 1308), 'numpy.sqrt', 'numpy.sqrt', (['(max_norm ** 2 - norms ** 2)'], {}), '(max_norm ** 2 - norms ** 2)\n', (1280, 1308), False, 'import numpy\n'), ((6135, 6156), 'numpy.append', 'numpy.append', (['user', '(0)'], {}), '(user, 0)\n', (6147, 6156), False, 'import numpy\n'), ((8592, 8647), 'annoy.AnnoyIndex', 'annoy.AnnoyIndex', (['self.item_factors.shape[1]', '"""angular"""'], {}), "(self.item_factors.shape[1], 'angular')\n", (8608, 8647), False, 'import annoy\n'), ((9023, 9066), 'annoy.AnnoyIndex', 'annoy.AnnoyIndex', (['extra.shape[1]', '"""angular"""'], {}), "(extra.shape[1], 'angular')\n", (9039, 9066), False, 'import annoy\n'), ((10516, 10537), 'numpy.append', 'numpy.append', (['user', '(0)'], {}), '(user, 0)\n', (10528, 10537), False, 'import numpy\n'), ((13008, 13037), 'faiss.IndexFlat', 'faiss.IndexFlat', (['self.factors'], {}), '(self.factors)\n', (13023, 13037), False, 'import faiss\n'), ((14897, 14923), 'numpy.linalg.norm', 'numpy.linalg.norm', (['factors'], {}), '(factors)\n', (14914, 14923), False, 'import numpy\n'), ((3758, 3810), 'nmslib.init', 'nmslib.init', ([], {'method': 'self.method', 'space': '"""cosinesimil"""'}), "(method=self.method, space='cosinesimil')\n", (3769, 3810), False, 'import nmslib\n'), ((4039, 4083), 'numpy.linalg.norm', 'numpy.linalg.norm', (['self.item_factors'], {'axis': '(1)'}), '(self.item_factors, axis=1)\n', (4056, 4083), False, 'import numpy\n'), ((4102, 4142), 'numpy.arange', 'numpy.arange', (['self.item_factors.shape[0]'], {}), '(self.item_factors.shape[0])\n', (4114, 4142), False, 'import numpy\n'), ((4225, 4281), 'numpy.delete', 'numpy.delete', (['self.item_factors', 'ids[norms == 0]'], {'axis': '(0)'}), '(self.item_factors, ids[norms == 0], axis=0)\n', (4237, 4281), False, 'import numpy\n'), ((4805, 4852), 'nmslib.init', 'nmslib.init', ([], {'method': '"""hnsw"""', 'space': '"""cosinesimil"""'}), "(method='hnsw', space='cosinesimil')\n", (4816, 4852), False, 'import nmslib\n'), ((6398, 6422), 'numpy.linalg.norm', 'numpy.linalg.norm', (['query'], {}), '(query)\n', (6415, 6422), False, 'import numpy\n'), ((10895, 10919), 'numpy.linalg.norm', 'numpy.linalg.norm', (['query'], {}), '(query)\n', (10912, 10919), False, 'import numpy\n'), ((13093, 13121), 'faiss.StandardGpuResources', 'faiss.StandardGpuResources', ([], {}), '()\n', (13119, 13121), False, 'import faiss\n'), ((13987, 14026), 'numpy.linalg.norm', 'numpy.linalg.norm', (['item_factors'], {'axis': '(1)'}), '(item_factors, axis=1)\n', (14004, 14026), False, 'import numpy\n'), ((3471, 3498), 'logging.getLogger', 'logging.getLogger', (['"""nmslib"""'], {}), "('nmslib')\n", (3488, 3498), False, 'import logging\n'), ((13322, 13422), 'faiss.GpuIndexIVFFlat', 'faiss.GpuIndexIVFFlat', (['self.gpu_resources', 'self.factors', 'self.nlist', 'faiss.METRIC_INNER_PRODUCT'], {}), '(self.gpu_resources, self.factors, self.nlist, faiss.\n METRIC_INNER_PRODUCT)\n', (13343, 13422), False, 'import faiss\n'), ((13506, 13599), 'faiss.IndexIVFFlat', 'faiss.IndexIVFFlat', (['self.quantizer', 'self.factors', 'self.nlist', 'faiss.METRIC_INNER_PRODUCT'], {}), '(self.quantizer, self.factors, self.nlist, faiss.\n METRIC_INNER_PRODUCT)\n', (13524, 13599), False, 'import faiss\n'), ((14185, 14285), 'faiss.GpuIndexIVFFlat', 'faiss.GpuIndexIVFFlat', (['self.gpu_resources', 'self.factors', 'self.nlist', 'faiss.METRIC_INNER_PRODUCT'], {}), '(self.gpu_resources, self.factors, self.nlist, faiss.\n METRIC_INNER_PRODUCT)\n', (14206, 14285), False, 'import faiss\n'), ((14369, 14462), 'faiss.IndexIVFFlat', 'faiss.IndexIVFFlat', (['self.quantizer', 'self.factors', 'self.nlist', 'faiss.METRIC_INNER_PRODUCT'], {}), '(self.quantizer, self.factors, self.nlist, faiss.\n METRIC_INNER_PRODUCT)\n', (14387, 14462), False, 'import faiss\n'), ((9752, 9769), 'numpy.array', 'numpy.array', (['dist'], {}), '(dist)\n', (9763, 9769), False, 'import numpy\n'), ((10951, 10968), 'numpy.array', 'numpy.array', (['dist'], {}), '(dist)\n', (10962, 10968), False, 'import numpy\n')] |
from typing import List, Tuple
import cv2
import imutils
import numpy as np
def adaptively_match_digit_hypotheses(template: np.ndarray, image: np.ndarray, scale_iterations: int = 10,
scale_min: float = 0.5, scale_max: float = 1.0,
match_threshold: float = 0.6) -> List[Tuple[List[int], float]]:
"""
:return: list of digit occurences ((x0, y0, x1, y1), confidence)
"""
image_height, image_width = image.shape
template_height, template_width = template.shape
rectangles = []
for scale in np.linspace(scale_min, scale_max, scale_iterations)[::-1]:
resized = imutils.resize(image, width=int(image_width * scale))
resized_height, resized_width = resized.shape
scale_inv = image_width / float(resized_width)
if resized_width < template_width or resized_height < template_height:
break
result = cv2.matchTemplate(resized, template, cv2.TM_CCOEFF_NORMED)
start_coordinates = np.where(result > match_threshold)
values = result[start_coordinates]
for y, x, confidence in zip(*start_coordinates, values):
rectangle_coordinates = get_rectangle_coordinates(x, y, scale_inv, template_width, template_height)
rectangle = (rectangle_coordinates, confidence)
rectangles.append(rectangle)
return rectangles
def get_rectangle_coordinates(x: int, y: int, scale_inv: int, template_width: int, template_height: int) -> List[int]:
x0 = (int)(x * scale_inv)
y0 = (int)(y * scale_inv)
x1 = (int)((x + template_width) * scale_inv)
y1 = (int)((y + template_height) * scale_inv)
return [x0, y0, x1, y1]
| [
"numpy.where",
"numpy.linspace",
"cv2.matchTemplate"
] | [((596, 647), 'numpy.linspace', 'np.linspace', (['scale_min', 'scale_max', 'scale_iterations'], {}), '(scale_min, scale_max, scale_iterations)\n', (607, 647), True, 'import numpy as np\n'), ((952, 1010), 'cv2.matchTemplate', 'cv2.matchTemplate', (['resized', 'template', 'cv2.TM_CCOEFF_NORMED'], {}), '(resized, template, cv2.TM_CCOEFF_NORMED)\n', (969, 1010), False, 'import cv2\n'), ((1039, 1073), 'numpy.where', 'np.where', (['(result > match_threshold)'], {}), '(result > match_threshold)\n', (1047, 1073), True, 'import numpy as np\n')] |
import numpy as np
from tslearn.metrics import cdist_gak
from tslearn.svm import TimeSeriesSVC, TimeSeriesSVR
__author__ = '<NAME> <EMAIL>[<EMAIL>'
def test_gamma_value_svm():
n, sz, d = 5, 10, 3
rng = np.random.RandomState(0)
time_series = rng.randn(n, sz, d)
labels = rng.randint(low=0, high=2, size=n)
gamma = 10.
for ModelClass in [TimeSeriesSVC, TimeSeriesSVR]:
gak_model = ModelClass(kernel="gak", gamma=gamma)
sklearn_X, _ = gak_model._preprocess_sklearn(time_series,
labels,
fit_time=True)
cdist_mat = cdist_gak(time_series, sigma=np.sqrt(gamma / 2.))
np.testing.assert_allclose(sklearn_X, cdist_mat)
def test_deprecated_still_work():
n, sz, d = 5, 10, 3
rng = np.random.RandomState(0)
X = rng.randn(n, sz, d)
y = rng.randint(low=0, high=2, size=n)
for ModelClass in [TimeSeriesSVC, TimeSeriesSVR]:
clf = ModelClass().fit(X, y)
np.testing.assert_equal(clf.support_vectors_time_series_().shape[1:],
X.shape[1:])
| [
"numpy.testing.assert_allclose",
"numpy.random.RandomState",
"numpy.sqrt"
] | [((214, 238), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (235, 238), True, 'import numpy as np\n'), ((848, 872), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (869, 872), True, 'import numpy as np\n'), ((729, 777), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['sklearn_X', 'cdist_mat'], {}), '(sklearn_X, cdist_mat)\n', (755, 777), True, 'import numpy as np\n'), ((699, 719), 'numpy.sqrt', 'np.sqrt', (['(gamma / 2.0)'], {}), '(gamma / 2.0)\n', (706, 719), True, 'import numpy as np\n')] |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The Iterative Quantum Phase Estimation Algorithm.
See https://arxiv.org/abs/quant-ph/0610214
"""
from typing import Optional, List, Dict, Union, Any
import logging
import numpy as np
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.quantum_info import Pauli
from qiskit.providers import BaseBackend
from qiskit.providers import Backend
from qiskit.aqua import QuantumInstance
from qiskit.aqua.operators import (WeightedPauliOperator, suzuki_expansion_slice_pauli_list,
evolution_instruction)
from qiskit.aqua.operators.legacy import op_converter
from qiskit.aqua.utils import get_subsystem_density_matrix
from qiskit.aqua.algorithms import QuantumAlgorithm
from qiskit.aqua.operators import LegacyBaseOperator, OperatorBase
from qiskit.aqua.components.initial_states import InitialState
from qiskit.aqua.utils.validation import validate_min, validate_in_set
from .minimum_eigen_solver import MinimumEigensolver, MinimumEigensolverResult
from .qpe import QPEResult
logger = logging.getLogger(__name__)
# pylint: disable=invalid-name
class IQPE(QuantumAlgorithm, MinimumEigensolver):
"""The Iterative Quantum Phase Estimation algorithm.
IQPE, as its name suggests, iteratively computes the phase so as to require fewer qubits.
It has the same set of parameters as :class:`QPE`, except for the number of
ancillary qubits *num_ancillae*, being replaced by *num_iterations* and that
an Inverse Quantum Fourier Transform (IQFT) is not used for IQPE.
**Reference:**
[1]: Dobsicek et al. (2006), Arbitrary accuracy iterative phase estimation algorithm as a two
qubit benchmark, `arxiv/quant-ph/0610214 <https://arxiv.org/abs/quant-ph/0610214>`_
"""
def __init__(self,
operator: Optional[Union[OperatorBase, LegacyBaseOperator]] = None,
state_in: Optional[Union[QuantumCircuit, InitialState]] = None,
num_time_slices: int = 1,
num_iterations: int = 1,
expansion_mode: str = 'suzuki',
expansion_order: int = 2,
shallow_circuit_concat: bool = False,
quantum_instance: Optional[
Union[QuantumInstance, BaseBackend, Backend]] = None) -> None:
"""
Args:
operator: The hamiltonian Operator
state_in: An InitialState component representing an initial quantum state.
num_time_slices: The number of time slices, has a minimum value of 1.
num_iterations: The number of iterations, has a minimum value of 1.
expansion_mode: The expansion mode ('trotter'|'suzuki')
expansion_order: The suzuki expansion order, has a min. value of 1.
shallow_circuit_concat: Set True to use shallow (cheap) mode for circuit concatenation
of evolution slices. By default this is False.
quantum_instance: Quantum Instance or Backend
"""
validate_min('num_time_slices', num_time_slices, 1)
validate_min('num_iterations', num_iterations, 1)
validate_in_set('expansion_mode', expansion_mode, {'trotter', 'suzuki'})
validate_min('expansion_order', expansion_order, 1)
super().__init__(quantum_instance) # type: ignore
self._state_in = state_in
self._num_time_slices = num_time_slices
self._num_iterations = num_iterations
self._expansion_mode = expansion_mode
self._expansion_order = expansion_order
self._shallow_circuit_concat = shallow_circuit_concat
self._state_register = None
self._ancillary_register = None
self._ancilla_phase_coef = None
self._in_operator = operator
self._operator = None # type: Optional[WeightedPauliOperator]
self._ret = {} # type: Dict[str, Any]
self._pauli_list = None # type: Optional[List[List[Union[complex, Pauli]]]]
self._phase_estimation_circuit = None
self._slice_pauli_list = None # type: Optional[List[List[Union[complex, Pauli]]]]
self._setup(operator)
def _setup(self, operator: Optional[Union[OperatorBase, LegacyBaseOperator]]) -> None:
self._operator = None
self._ret = {}
self._pauli_list = None
self._phase_estimation_circuit = None
self._slice_pauli_list = None
if operator:
# Convert to Legacy Operator if Operator flow passed in
if isinstance(operator, OperatorBase):
operator = operator.to_legacy_op()
self._operator = op_converter.to_weighted_pauli_operator(operator.copy())
self._ret['translation'] = sum([abs(p[0]) for p in self._operator.reorder_paulis()])
self._ret['stretch'] = 0.5 / self._ret['translation']
# translate the operator
self._operator.simplify()
translation_op = WeightedPauliOperator([
[
self._ret['translation'],
Pauli(
(np.zeros(self._operator.num_qubits),
np.zeros(self._operator.num_qubits))
)
]
])
translation_op.simplify()
self._operator += translation_op
self._pauli_list = self._operator.reorder_paulis()
# stretch the operator
for p in self._pauli_list:
p[0] = p[0] * self._ret['stretch']
if len(self._pauli_list) == 1:
slice_pauli_list = self._pauli_list
else:
if self._expansion_mode == 'trotter':
slice_pauli_list = self._pauli_list
else:
slice_pauli_list = suzuki_expansion_slice_pauli_list(self._pauli_list,
1, self._expansion_order)
self._slice_pauli_list = slice_pauli_list
@property
def operator(self) -> Optional[Union[OperatorBase, LegacyBaseOperator]]:
""" Returns operator """
return self._in_operator
@operator.setter
def operator(self, operator: Union[OperatorBase, LegacyBaseOperator]) -> None:
""" set operator """
self._in_operator = operator
self._setup(operator)
@property
def aux_operators(self) -> Optional[List[Union[OperatorBase, LegacyBaseOperator]]]:
""" Returns aux operators """
raise TypeError('aux_operators not supported.')
@aux_operators.setter
def aux_operators(self,
aux_operators: Optional[List[Union[OperatorBase, LegacyBaseOperator]]]
) -> None:
""" Set aux operators """
raise TypeError('aux_operators not supported.')
def construct_circuit(self,
k: Optional[int] = None,
omega: float = 0,
measurement: bool = False) -> QuantumCircuit:
"""Construct the kth iteration Quantum Phase Estimation circuit.
For details of parameters, please see Fig. 2 in https://arxiv.org/pdf/quant-ph/0610214.pdf.
Args:
k: the iteration idx.
omega: the feedback angle.
measurement: Boolean flag to indicate if measurement should
be included in the circuit.
Returns:
QuantumCircuit: the quantum circuit per iteration
"""
if self._operator is None or self._state_in is None:
return None
k = self._num_iterations if k is None else k
a = QuantumRegister(1, name='a')
q = QuantumRegister(self._operator.num_qubits, name='q')
self._ancillary_register = a
self._state_register = q
qc = QuantumCircuit(q)
if isinstance(self._state_in, QuantumCircuit):
qc.append(self._state_in, q)
else:
qc += self._state_in.construct_circuit('circuit', q)
# hadamard on a[0]
qc.add_register(a)
qc.h(a[0])
# controlled-U
qc_evolutions_inst = evolution_instruction(self._slice_pauli_list, -2 * np.pi,
self._num_time_slices,
controlled=True, power=2 ** (k - 1),
shallow_slicing=self._shallow_circuit_concat)
if self._shallow_circuit_concat:
qc_evolutions = QuantumCircuit(q, a)
qc_evolutions.append(qc_evolutions_inst, list(q) + [a[0]])
qc.data += qc_evolutions.data
else:
qc.append(qc_evolutions_inst, list(q) + [a[0]])
# global phase due to identity pauli
qc.p(2 * np.pi * self._ancilla_phase_coef * (2 ** (k - 1)), a[0])
# rz on a[0]
qc.p(omega, a[0])
# hadamard on a[0]
qc.h(a[0])
if measurement:
c = ClassicalRegister(1, name='c')
qc.add_register(c)
# qc.barrier(self._ancillary_register)
qc.measure(self._ancillary_register, c)
return qc
def compute_minimum_eigenvalue(
self,
operator: Optional[Union[OperatorBase, LegacyBaseOperator]] = None,
aux_operators: Optional[List[Union[OperatorBase, LegacyBaseOperator]]] = None
) -> MinimumEigensolverResult:
super().compute_minimum_eigenvalue(operator, aux_operators)
return self._run()
def _estimate_phase_iteratively(self):
"""
Iteratively construct the different order of controlled evolution
circuit to carry out phase estimation.
"""
self._ret['top_measurement_label'] = ''
omega_coef = 0
# k runs from the number of iterations back to 1
for k in range(self._num_iterations, 0, -1):
omega_coef /= 2
if self._quantum_instance.is_statevector:
qc = self.construct_circuit(k, -2 * np.pi * omega_coef, measurement=False)
result = self._quantum_instance.execute(qc)
complete_state_vec = result.get_statevector(qc)
ancilla_density_mat = get_subsystem_density_matrix(
complete_state_vec,
range(self._operator.num_qubits)
)
ancilla_density_mat_diag = np.diag(ancilla_density_mat)
max_amplitude = max(ancilla_density_mat_diag.min(),
ancilla_density_mat_diag.max(), key=abs)
x = np.where(ancilla_density_mat_diag == max_amplitude)[0][0]
else:
qc = self.construct_circuit(k, -2 * np.pi * omega_coef, measurement=True)
measurements = self._quantum_instance.execute(qc).get_counts(qc)
if '0' not in measurements:
if '1' in measurements:
x = 1
else:
raise RuntimeError('Unexpected measurement {}.'.format(measurements))
else:
if '1' not in measurements:
x = 0
else:
x = 1 if measurements['1'] > measurements['0'] else 0
self._ret['top_measurement_label'] = \
'{}{}'.format(x, self._ret['top_measurement_label'])
omega_coef = omega_coef + x / 2
logger.info('Reverse iteration %s of %s with measured bit %s',
k, self._num_iterations, x)
return omega_coef
def _compute_energy(self):
# check for identify paulis to get its coef for applying global phase shift on ancilla later
num_identities = 0
self._pauli_list = self._operator.reorder_paulis()
for p in self._pauli_list:
if np.all(np.logical_not(p[1].z)) and np.all(np.logical_not(p[1].x)):
num_identities += 1
if num_identities > 1:
raise RuntimeError('Multiple identity pauli terms are present.')
self._ancilla_phase_coef = p[0].real if isinstance(p[0], complex) else p[0]
self._ret['phase'] = self._estimate_phase_iteratively()
self._ret['top_measurement_decimal'] = sum([t[0] * t[1] for t in zip(
[1 / 2 ** p for p in range(1, self._num_iterations + 1)],
[int(n) for n in self._ret['top_measurement_label']]
)])
self._ret['energy'] = self._ret['phase'] / self._ret['stretch'] - self._ret['translation']
def _run(self) -> 'IQPEResult':
self._compute_energy()
result = IQPEResult()
if 'translation' in self._ret:
result.translation = self._ret['translation']
if 'stretch' in self._ret:
result.stretch = self._ret['stretch']
if 'top_measurement_label' in self._ret:
result.top_measurement_label = self._ret['top_measurement_label']
if 'top_measurement_decimal' in self._ret:
result.top_measurement_decimal = self._ret['top_measurement_decimal']
if 'energy' in self._ret:
result.eigenvalue = complex(self._ret['energy'])
if 'phase' in self._ret:
result.phase = self._ret['phase']
return result
class IQPEResult(QPEResult):
""" IQPE Result."""
@property
def phase(self) -> float:
""" Returns phase """
return self.get('phase')
@phase.setter
def phase(self, value: float) -> None:
""" Sets phase """
self.data['phase'] = value
@staticmethod
def from_dict(a_dict: Dict) -> 'IQPEResult':
""" create new object from a dictionary """
return IQPEResult(a_dict)
| [
"qiskit.aqua.utils.validation.validate_min",
"qiskit.QuantumCircuit",
"qiskit.aqua.operators.evolution_instruction",
"numpy.logical_not",
"numpy.zeros",
"qiskit.aqua.operators.suzuki_expansion_slice_pauli_list",
"numpy.where",
"qiskit.aqua.utils.validation.validate_in_set",
"qiskit.ClassicalRegister... | [((1532, 1559), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1549, 1559), False, 'import logging\n'), ((3508, 3559), 'qiskit.aqua.utils.validation.validate_min', 'validate_min', (['"""num_time_slices"""', 'num_time_slices', '(1)'], {}), "('num_time_slices', num_time_slices, 1)\n", (3520, 3559), False, 'from qiskit.aqua.utils.validation import validate_min, validate_in_set\n'), ((3568, 3617), 'qiskit.aqua.utils.validation.validate_min', 'validate_min', (['"""num_iterations"""', 'num_iterations', '(1)'], {}), "('num_iterations', num_iterations, 1)\n", (3580, 3617), False, 'from qiskit.aqua.utils.validation import validate_min, validate_in_set\n'), ((3626, 3698), 'qiskit.aqua.utils.validation.validate_in_set', 'validate_in_set', (['"""expansion_mode"""', 'expansion_mode', "{'trotter', 'suzuki'}"], {}), "('expansion_mode', expansion_mode, {'trotter', 'suzuki'})\n", (3641, 3698), False, 'from qiskit.aqua.utils.validation import validate_min, validate_in_set\n'), ((3707, 3758), 'qiskit.aqua.utils.validation.validate_min', 'validate_min', (['"""expansion_order"""', 'expansion_order', '(1)'], {}), "('expansion_order', expansion_order, 1)\n", (3719, 3758), False, 'from qiskit.aqua.utils.validation import validate_min, validate_in_set\n'), ((8139, 8167), 'qiskit.QuantumRegister', 'QuantumRegister', (['(1)'], {'name': '"""a"""'}), "(1, name='a')\n", (8154, 8167), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((8180, 8232), 'qiskit.QuantumRegister', 'QuantumRegister', (['self._operator.num_qubits'], {'name': '"""q"""'}), "(self._operator.num_qubits, name='q')\n", (8195, 8232), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((8316, 8333), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['q'], {}), '(q)\n', (8330, 8333), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((8634, 8807), 'qiskit.aqua.operators.evolution_instruction', 'evolution_instruction', (['self._slice_pauli_list', '(-2 * np.pi)', 'self._num_time_slices'], {'controlled': '(True)', 'power': '(2 ** (k - 1))', 'shallow_slicing': 'self._shallow_circuit_concat'}), '(self._slice_pauli_list, -2 * np.pi, self.\n _num_time_slices, controlled=True, power=2 ** (k - 1), shallow_slicing=\n self._shallow_circuit_concat)\n', (8655, 8807), False, 'from qiskit.aqua.operators import WeightedPauliOperator, suzuki_expansion_slice_pauli_list, evolution_instruction\n'), ((9020, 9040), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['q', 'a'], {}), '(q, a)\n', (9034, 9040), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((9480, 9510), 'qiskit.ClassicalRegister', 'ClassicalRegister', (['(1)'], {'name': '"""c"""'}), "(1, name='c')\n", (9497, 9510), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((10908, 10936), 'numpy.diag', 'np.diag', (['ancilla_density_mat'], {}), '(ancilla_density_mat)\n', (10915, 10936), True, 'import numpy as np\n'), ((6282, 6359), 'qiskit.aqua.operators.suzuki_expansion_slice_pauli_list', 'suzuki_expansion_slice_pauli_list', (['self._pauli_list', '(1)', 'self._expansion_order'], {}), '(self._pauli_list, 1, self._expansion_order)\n', (6315, 6359), False, 'from qiskit.aqua.operators import WeightedPauliOperator, suzuki_expansion_slice_pauli_list, evolution_instruction\n'), ((12385, 12407), 'numpy.logical_not', 'np.logical_not', (['p[1].z'], {}), '(p[1].z)\n', (12399, 12407), True, 'import numpy as np\n'), ((12420, 12442), 'numpy.logical_not', 'np.logical_not', (['p[1].x'], {}), '(p[1].x)\n', (12434, 12442), True, 'import numpy as np\n'), ((11102, 11153), 'numpy.where', 'np.where', (['(ancilla_density_mat_diag == max_amplitude)'], {}), '(ancilla_density_mat_diag == max_amplitude)\n', (11110, 11153), True, 'import numpy as np\n'), ((5571, 5606), 'numpy.zeros', 'np.zeros', (['self._operator.num_qubits'], {}), '(self._operator.num_qubits)\n', (5579, 5606), True, 'import numpy as np\n'), ((5633, 5668), 'numpy.zeros', 'np.zeros', (['self._operator.num_qubits'], {}), '(self._operator.num_qubits)\n', (5641, 5668), True, 'import numpy as np\n')] |
# Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Integration tests for the frontend engine.py module with the backends"""
import os
import numbers
import pytest
import numpy as np
import strawberryfields as sf
from strawberryfields import ops
from strawberryfields.backends import BaseFock, FockBackend, GaussianBackend
from strawberryfields.backends.states import BaseState
try:
from strawberryfields.backends.tfbackend import TFBackend
import tensorflow as tf
except (ImportError, ValueError):
eng_backend_params = [("gaussian", GaussianBackend), ("fock", FockBackend)]
else:
eng_backend_params = [
("tf", TFBackend),
("gaussian", GaussianBackend),
("fock", FockBackend),
]
# make test deterministic
np.random.seed(42)
a = 0.1234
b = -0.543
c = 0.312
@pytest.mark.parametrize("name,expected", eng_backend_params)
def test_load_backend(name, expected, cutoff):
"""Test backends can be correctly loaded via strings"""
eng = sf.Engine(name)
assert isinstance(eng.backend, expected)
class TestEngineReset:
"""Test engine reset functionality"""
def test_init_vacuum(self, setup_eng, tol):
"""Test that the engine is initialized to the vacuum state"""
eng, prog = setup_eng(2)
eng.run(prog) # run an empty program
assert np.all(eng.backend.is_vacuum(tol))
def test_reset_vacuum(self, setup_eng, tol):
"""Test that resetting the engine returns to the vacuum state"""
eng, prog = setup_eng(2)
with prog.context:
ops.Dgate(0.5, 0.0) | 0
eng.run(prog)
assert not np.all(eng.backend.is_vacuum(tol))
eng.reset()
assert np.all(eng.backend.is_vacuum(tol))
@pytest.mark.backends("fock")
def test_eng_reset(self, setup_eng, cutoff):
"""Test the Engine.reset() features."""
eng, prog = setup_eng(2)
state = eng.run(prog).state
backend_cutoff = eng.backend.get_cutoff_dim()
assert state._cutoff == backend_cutoff
assert cutoff == backend_cutoff
# change the cutoff dimension
new_cutoff = cutoff + 1
eng.reset({"cutoff_dim": new_cutoff})
state = eng.run(prog).state
backend_cutoff = eng.backend.get_cutoff_dim()
assert state._cutoff == backend_cutoff
assert new_cutoff == backend_cutoff
class TestProperExecution:
"""Test that various frontend circuits execute through
the backend with no error"""
def test_no_return_state(self, setup_eng):
"""Engine returns no state object when none is requested."""
eng, prog = setup_eng(2)
res = eng.run(prog, modes=[])
assert res.state is None
def test_return_state(self, setup_eng):
"""Engine returns a valid state object."""
eng, prog = setup_eng(2)
res = eng.run(prog)
assert isinstance(res.state, BaseState)
def test_return_samples(self, setup_eng):
"""Engine returns measurement samples."""
eng, prog = setup_eng(2)
with prog.context as q:
ops.MeasureX | q[0]
res = eng.run(prog)
# one entry for each measured mode
assert len(res.samples[0]) == 1
# the same samples can also be found in the regrefs
assert np.equal(
[r.val for r in prog.register if r.val is not None], np.ravel(res.samples)
).all()
# first mode was measured
if eng.backend_name == "tf":
assert isinstance(res.samples[0][0], tf.Tensor)
else:
assert isinstance(res.samples[0], (numbers.Number, np.ndarray))
# second mode was not measured
assert prog.register[1].val is None
@pytest.mark.backends("bosonic")
def test_return_ancillae_samples(self, setup_eng):
"""Engine returns measurement samples from ancillary states
used for measurement-based gates."""
eng, prog = setup_eng(1)
with prog.context as q:
ops.MSgate(1, avg=False) | q
ops.MSgate(1, avg=False) | q
res = eng.run(prog)
assert isinstance(eng, sf.engine.BosonicEngine)
assert len(res.ancillae_samples[0]) == 2
assert str(res) == "<Result: shots=0, num_modes=0, num_ancillae=2, contains state=True>"
# TODO: Some of these tests should probably check *something* after execution
def test_measured_parameter(self, setup_eng):
"""Test that a circuit with measured parameters executes successfully."""
eng, prog = setup_eng(2)
with prog.context as q:
ops.MeasureX | q[0]
ops.Sgate(q[0].par) | q[1]
# symbolic hermitian conjugate together with register reference
ops.Dgate(q[0].par, 0).H | q[1]
# algebraic transformation
ops.Sgate(q[0].par ** 2) | q[1]
# algebraic transformation and h.c.
ops.Dgate(q[0].par, np.pi).H | q[1]
eng.run(prog)
@pytest.mark.backends("tf")
@pytest.mark.skipif(
"BATCHED" not in os.environ, reason="Test for when combining batched samples"
)
def test_combine_batched_samples(self, batch_size, setup_eng):
"""Test that batched samples are forwarded to ``Result.combine_samples`` correctly"""
eng, prog = setup_eng(4)
with prog.context as q:
ops.MeasureFock() | (q[0], q[2])
ops.MeasureX | q[1]
samples = eng.run(prog).samples
# check the shape; should be (batches, shots, measured_modes)
if batch_size:
assert samples.shape == (batch_size, 1, 3)
for batch in samples:
# check that MesureFock measures `0` while MeasureX does NOT measure `0`.
correct_samples = [0, 1, 0]
assert [bool(i) for i in batch[0]] == correct_samples
else:
assert samples.shape == (1, 3)
def test_homodyne_measurement_vacuum(self, setup_eng, tol):
"""MeasureX and MeasureP leave the mode in the vacuum state"""
eng, prog = setup_eng(2)
with prog.context as q:
ops.Coherent(a, c) | q[0]
ops.Coherent(b, c) | q[1]
ops.MeasureX | q[0]
ops.MeasureP | q[1]
eng.run(prog)
assert np.all(eng.backend.is_vacuum(tol))
def test_homodyne_measurement_vacuum_phi(self, setup_eng, tol):
"""Homodyne measurements leave the mode in the vacuum state"""
eng, prog = setup_eng(2)
with prog.context as q:
ops.Coherent(a, b) | q[0]
ops.MeasureHomodyne(c) | q[0]
eng.run(prog)
assert np.all(eng.backend.is_vacuum(tol))
def test_program_subroutine(self, setup_eng, tol):
"""Simple quantum program with a subroutine and references."""
eng, prog = setup_eng(2)
# define some gates
D = ops.Dgate(0.5, 0.0)
BS = ops.BSgate(0.7 * np.pi, np.pi / 2)
R = ops.Rgate(np.pi / 3)
def subroutine(a, b):
"""Subroutine for the quantum program"""
R | a
BS | (a, b)
R.H | a
# main program
with prog.context as q:
# get register references
alice, bob = q
ops.All(ops.Vacuum()) | (alice, bob)
D | alice
subroutine(alice, bob)
BS | (alice, bob)
subroutine(bob, alice)
state = eng.run(prog).state
# state norm must be invariant
if isinstance(eng.backend, BaseFock):
assert np.allclose(state.trace(), 1, atol=tol, rtol=0)
def test_subsystems(self, setup_eng, tol):
"""Check that the backend keeps in sync with the program when creating and deleting modes."""
null = sf.Program(2) # empty program
eng, prog = setup_eng(2)
# define some gates
D = ops.Dgate(0.5, 0.0)
BS = ops.BSgate(2 * np.pi, np.pi / 2)
R = ops.Rgate(np.pi)
with prog.context as q:
alice, bob = q
D | alice
BS | (alice, bob)
ops.Del | alice
R | bob
(charlie,) = ops.New(1)
BS | (bob, charlie)
ops.MeasureX | bob
ops.Del | bob
D.H | charlie
ops.MeasureX | charlie
def check_reg(p, expected_n=None):
"""Compare Program.register with the mode list returned by the backend.
They should always be in agreement after Engine.run() and Engine.reset().
"""
rr = p.register
modes = eng.backend.get_modes()
# number of elements
assert len(rr) == len(modes)
if expected_n is not None:
assert len(rr) == expected_n
# check indices match
assert np.all([r.ind for r in rr] == modes)
# activity
assert np.all([r.active for r in rr])
state = eng.run(null)
check_reg(null, 2)
state = eng.run(prog).state
check_reg(prog, 1)
# state norm must be invariant
if isinstance(eng.backend, BaseFock):
assert np.allclose(state.trace(), 1, atol=tol, rtol=0)
# check that reset() works
eng.reset()
# the regrefs are reset as well
assert np.all([r.val is None for r in prog.register])
def test_empty_program(self, setup_eng):
"""Empty programs do not change the state of the backend."""
eng, p1 = setup_eng(2)
a = 0.23
r = 0.1
with p1.context as q:
ops.Dgate(a, 0.0) | q[0]
ops.Sgate(r) | q[1]
state1 = eng.run(p1).state
# empty program
p2 = sf.Program(p1)
state2 = eng.run(p2).state
assert state1 == state2
p3 = sf.Program(p2)
with p3.context as q:
ops.Rgate(r) | q[0]
state3 = eng.run(p3).state
assert not state1 == state3
state4 = eng.run(p2).state
assert state3 == state4
# TODO: when ``shots`` is incorporated into other backends, unmark this test
@pytest.mark.backends("gaussian")
def test_measurefock_shots(self, setup_eng):
"""Tests that passing shots with a program containing MeasureFock
returns a result whose entries have the right shapes and values"""
shots = 5
expected = np.zeros(dtype=int, shape=(shots,))
# all modes
eng, p1 = setup_eng(3)
with p1.context as q:
ops.MeasureFock() | q
samples = eng.run(p1, shots=shots).samples.astype(int)
assert samples.shape == (shots, 3)
assert all(samples[:, 0] == expected)
assert all(samples[:, 1] == expected)
assert all(samples[:, 2] == expected)
# some modes
eng, p2 = setup_eng(3)
with p2.context as q:
ops.MeasureFock() | (q[0], q[2])
samples = eng.run(p2, shots=shots).samples
assert samples.shape == (shots, 2)
assert all(samples[:, 0].astype(int) == expected)
assert all(samples[:, 1].astype(int) == expected)
# one mode
eng, p3 = setup_eng(3)
with p3.context as q:
ops.MeasureFock() | q[0]
samples = eng.run(p3, shots=shots).samples
assert samples.shape == (shots, 1)
assert all(samples[:, 0].astype(int) == expected)
# TODO: when ``shots`` is incorporated into other backends, delete this test
@pytest.mark.backends("tf", "fock")
@pytest.mark.skipif("BATCHED" in os.environ, reason="Test only runs for non-batched backends")
def test_measurefock_shots_exception(self, setup_eng):
shots = 5
eng, p1 = setup_eng(3)
with p1.context as q:
ops.MeasureFock() | q
backend_name = eng.backend.__str__()
with pytest.raises(
NotImplementedError,
match=r"""(Measure|MeasureFock) has not been implemented in {} """
"""for the arguments {{'shots': {}}}""".format(backend_name, shots),
):
eng.run(p1, shots=shots).samples
| [
"strawberryfields.Engine",
"numpy.random.seed",
"numpy.ravel",
"pytest.mark.skipif",
"strawberryfields.ops.MSgate",
"pytest.mark.parametrize",
"strawberryfields.ops.BSgate",
"strawberryfields.ops.Coherent",
"strawberryfields.ops.MeasureHomodyne",
"strawberryfields.Program",
"strawberryfields.ops... | [((1300, 1318), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1314, 1318), True, 'import numpy as np\n'), ((1354, 1414), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""name,expected"""', 'eng_backend_params'], {}), "('name,expected', eng_backend_params)\n", (1377, 1414), False, 'import pytest\n'), ((1532, 1547), 'strawberryfields.Engine', 'sf.Engine', (['name'], {}), '(name)\n', (1541, 1547), True, 'import strawberryfields as sf\n'), ((2282, 2310), 'pytest.mark.backends', 'pytest.mark.backends', (['"""fock"""'], {}), "('fock')\n", (2302, 2310), False, 'import pytest\n'), ((4269, 4300), 'pytest.mark.backends', 'pytest.mark.backends', (['"""bosonic"""'], {}), "('bosonic')\n", (4289, 4300), False, 'import pytest\n'), ((5529, 5555), 'pytest.mark.backends', 'pytest.mark.backends', (['"""tf"""'], {}), "('tf')\n", (5549, 5555), False, 'import pytest\n'), ((5561, 5663), 'pytest.mark.skipif', 'pytest.mark.skipif', (["('BATCHED' not in os.environ)"], {'reason': '"""Test for when combining batched samples"""'}), "('BATCHED' not in os.environ, reason=\n 'Test for when combining batched samples')\n", (5579, 5663), False, 'import pytest\n'), ((10681, 10713), 'pytest.mark.backends', 'pytest.mark.backends', (['"""gaussian"""'], {}), "('gaussian')\n", (10701, 10713), False, 'import pytest\n'), ((12040, 12074), 'pytest.mark.backends', 'pytest.mark.backends', (['"""tf"""', '"""fock"""'], {}), "('tf', 'fock')\n", (12060, 12074), False, 'import pytest\n'), ((12080, 12178), 'pytest.mark.skipif', 'pytest.mark.skipif', (["('BATCHED' in os.environ)"], {'reason': '"""Test only runs for non-batched backends"""'}), "('BATCHED' in os.environ, reason=\n 'Test only runs for non-batched backends')\n", (12098, 12178), False, 'import pytest\n'), ((7434, 7453), 'strawberryfields.ops.Dgate', 'ops.Dgate', (['(0.5)', '(0.0)'], {}), '(0.5, 0.0)\n', (7443, 7453), False, 'from strawberryfields import ops\n'), ((7467, 7501), 'strawberryfields.ops.BSgate', 'ops.BSgate', (['(0.7 * np.pi)', '(np.pi / 2)'], {}), '(0.7 * np.pi, np.pi / 2)\n', (7477, 7501), False, 'from strawberryfields import ops\n'), ((7514, 7534), 'strawberryfields.ops.Rgate', 'ops.Rgate', (['(np.pi / 3)'], {}), '(np.pi / 3)\n', (7523, 7534), False, 'from strawberryfields import ops\n'), ((8328, 8341), 'strawberryfields.Program', 'sf.Program', (['(2)'], {}), '(2)\n', (8338, 8341), True, 'import strawberryfields as sf\n'), ((8433, 8452), 'strawberryfields.ops.Dgate', 'ops.Dgate', (['(0.5)', '(0.0)'], {}), '(0.5, 0.0)\n', (8442, 8452), False, 'from strawberryfields import ops\n'), ((8466, 8498), 'strawberryfields.ops.BSgate', 'ops.BSgate', (['(2 * np.pi)', '(np.pi / 2)'], {}), '(2 * np.pi, np.pi / 2)\n', (8476, 8498), False, 'from strawberryfields import ops\n'), ((8511, 8527), 'strawberryfields.ops.Rgate', 'ops.Rgate', (['np.pi'], {}), '(np.pi)\n', (8520, 8527), False, 'from strawberryfields import ops\n'), ((9884, 9932), 'numpy.all', 'np.all', (['[(r.val is None) for r in prog.register]'], {}), '([(r.val is None) for r in prog.register])\n', (9890, 9932), True, 'import numpy as np\n'), ((10282, 10296), 'strawberryfields.Program', 'sf.Program', (['p1'], {}), '(p1)\n', (10292, 10296), True, 'import strawberryfields as sf\n'), ((10378, 10392), 'strawberryfields.Program', 'sf.Program', (['p2'], {}), '(p2)\n', (10388, 10392), True, 'import strawberryfields as sf\n'), ((10949, 10984), 'numpy.zeros', 'np.zeros', ([], {'dtype': 'int', 'shape': '(shots,)'}), '(dtype=int, shape=(shots,))\n', (10957, 10984), True, 'import numpy as np\n'), ((8713, 8723), 'strawberryfields.ops.New', 'ops.New', (['(1)'], {}), '(1)\n', (8720, 8723), False, 'from strawberryfields import ops\n'), ((9389, 9425), 'numpy.all', 'np.all', (['([r.ind for r in rr] == modes)'], {}), '([r.ind for r in rr] == modes)\n', (9395, 9425), True, 'import numpy as np\n'), ((9468, 9498), 'numpy.all', 'np.all', (['[r.active for r in rr]'], {}), '([r.active for r in rr])\n', (9474, 9498), True, 'import numpy as np\n'), ((2104, 2123), 'strawberryfields.ops.Dgate', 'ops.Dgate', (['(0.5)', '(0.0)'], {}), '(0.5, 0.0)\n', (2113, 2123), False, 'from strawberryfields import ops\n'), ((4547, 4571), 'strawberryfields.ops.MSgate', 'ops.MSgate', (['(1)'], {'avg': '(False)'}), '(1, avg=False)\n', (4557, 4571), False, 'from strawberryfields import ops\n'), ((4588, 4612), 'strawberryfields.ops.MSgate', 'ops.MSgate', (['(1)'], {'avg': '(False)'}), '(1, avg=False)\n', (4598, 4612), False, 'from strawberryfields import ops\n'), ((5174, 5193), 'strawberryfields.ops.Sgate', 'ops.Sgate', (['q[0].par'], {}), '(q[0].par)\n', (5183, 5193), False, 'from strawberryfields import ops\n'), ((5372, 5396), 'strawberryfields.ops.Sgate', 'ops.Sgate', (['(q[0].par ** 2)'], {}), '(q[0].par ** 2)\n', (5381, 5396), False, 'from strawberryfields import ops\n'), ((5911, 5928), 'strawberryfields.ops.MeasureFock', 'ops.MeasureFock', ([], {}), '()\n', (5926, 5928), False, 'from strawberryfields import ops\n'), ((6674, 6692), 'strawberryfields.ops.Coherent', 'ops.Coherent', (['a', 'c'], {}), '(a, c)\n', (6686, 6692), False, 'from strawberryfields import ops\n'), ((6712, 6730), 'strawberryfields.ops.Coherent', 'ops.Coherent', (['b', 'c'], {}), '(b, c)\n', (6724, 6730), False, 'from strawberryfields import ops\n'), ((7092, 7110), 'strawberryfields.ops.Coherent', 'ops.Coherent', (['a', 'b'], {}), '(a, b)\n', (7104, 7110), False, 'from strawberryfields import ops\n'), ((7130, 7152), 'strawberryfields.ops.MeasureHomodyne', 'ops.MeasureHomodyne', (['c'], {}), '(c)\n', (7149, 7152), False, 'from strawberryfields import ops\n'), ((10152, 10169), 'strawberryfields.ops.Dgate', 'ops.Dgate', (['a', '(0.0)'], {}), '(a, 0.0)\n', (10161, 10169), False, 'from strawberryfields import ops\n'), ((10189, 10201), 'strawberryfields.ops.Sgate', 'ops.Sgate', (['r'], {}), '(r)\n', (10198, 10201), False, 'from strawberryfields import ops\n'), ((10435, 10447), 'strawberryfields.ops.Rgate', 'ops.Rgate', (['r'], {}), '(r)\n', (10444, 10447), False, 'from strawberryfields import ops\n'), ((11079, 11096), 'strawberryfields.ops.MeasureFock', 'ops.MeasureFock', ([], {}), '()\n', (11094, 11096), False, 'from strawberryfields import ops\n'), ((11440, 11457), 'strawberryfields.ops.MeasureFock', 'ops.MeasureFock', ([], {}), '()\n', (11455, 11457), False, 'from strawberryfields import ops\n'), ((11776, 11793), 'strawberryfields.ops.MeasureFock', 'ops.MeasureFock', ([], {}), '()\n', (11791, 11793), False, 'from strawberryfields import ops\n'), ((12324, 12341), 'strawberryfields.ops.MeasureFock', 'ops.MeasureFock', ([], {}), '()\n', (12339, 12341), False, 'from strawberryfields import ops\n'), ((3921, 3942), 'numpy.ravel', 'np.ravel', (['res.samples'], {}), '(res.samples)\n', (3929, 3942), True, 'import numpy as np\n'), ((5289, 5311), 'strawberryfields.ops.Dgate', 'ops.Dgate', (['q[0].par', '(0)'], {}), '(q[0].par, 0)\n', (5298, 5311), False, 'from strawberryfields import ops\n'), ((5464, 5490), 'strawberryfields.ops.Dgate', 'ops.Dgate', (['q[0].par', 'np.pi'], {}), '(q[0].par, np.pi)\n', (5473, 5490), False, 'from strawberryfields import ops\n'), ((7822, 7834), 'strawberryfields.ops.Vacuum', 'ops.Vacuum', ([], {}), '()\n', (7832, 7834), False, 'from strawberryfields import ops\n')] |
"""Desk environment with Franka Panda arm."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from dm_control import mujoco
from dm_control.utils import inverse_kinematics
import gym
import numpy as np
from PIL import Image
class RoboDesk(gym.Env):
"""Multi-task manipulation environment."""
def __init__(self, task='open_slide', reward='dense', action_repeat=1,
episode_length=500, image_size=64):
assert reward in ('dense', 'sparse', 'success'), reward
model_path = os.path.join(os.path.dirname(__file__), 'assets/desk.xml')
self.physics = mujoco.Physics.from_xml_path(model_path)
self.physics_copy = self.physics.copy(share_model=True)
self.physics_copy.data.qpos[:] = self.physics.data.qpos[:]
# Robot constants
self.num_joints = 9
self.joint_bounds = self.physics.model.actuator_ctrlrange.copy()
# Environment params
self.image_size = image_size
self.action_dim = 5
self.reward = reward
self.success = None
# Action space
self.end_effector_scale = 0.01
self.wrist_scale = 0.02
self.joint_scale = 0.02
# Episode length
self.action_repeat = action_repeat
self.num_steps = 0
self.episode_length = episode_length
self.original_pos = {}
self.previous_z_angle = None
self.total_rotation = 0
# pylint: disable=g-long-lambda
self.reward_functions = {
# Core tasks
'open_slide': self._slide_reward,
'open_drawer': self._drawer_reward,
'push_green': (lambda reward_type: self._button_reward(
'green', reward_type)),
'stack': self._stack_reward,
'upright_block_off_table': (lambda reward_type: self._push_off_table(
'upright_block', reward_type)),
'flat_block_in_bin': (lambda reward_type: self._put_in_bin(
'flat_block', reward_type)),
'flat_block_in_shelf': (lambda reward_type: self._put_in_shelf(
'flat_block', reward_type)),
'lift_upright_block': (lambda reward_type: self._lift_block(
'upright_block', reward_type)),
'lift_ball': (lambda reward_type: self._lift_block(
'ball', reward_type)),
# Extra tasks
'push_blue': (lambda reward_type: self._button_reward(
'blue', reward_type)),
'push_red': (lambda reward_type: self._button_reward(
'red', reward_type)),
'flat_block_off_table': (lambda reward_type: self._push_off_table(
'flat_block', reward_type)),
'ball_off_table': (lambda reward_type: self._push_off_table(
'ball', reward_type)),
'upright_block_in_bin': (lambda reward_type: self._put_in_bin(
'upright_block', reward_type)),
'ball_in_bin': (lambda reward_type: self._put_in_bin(
'ball', reward_type)),
'upright_block_in_shelf': (lambda reward_type: self._put_in_shelf(
'upright_block', reward_type)),
'ball_in_shelf': (lambda reward_type: self._put_in_shelf(
'ball', reward_type)),
'lift_flat_block': (lambda reward_type: self._lift_block(
'flat_block', reward_type)),
}
self.core_tasks = list(self.reward_functions)[0:12]
self.all_tasks = list(self.reward_functions)
self.task = task
# pylint: enable=g-long-lambda
@property
def action_space(self):
return gym.spaces.Box(-np.ones(self.action_dim), np.ones(self.action_dim))
@property
def observation_space(self):
spaces = {
'image': gym.spaces.Box(
0, 255, (self.image_size, self.image_size, 3), np.uint8),
'qpos_robot': gym.spaces.Box(self.joint_bounds[:, 0],
self.joint_bounds[:, 1]),
'qvel_robot': gym.spaces.Box(-np.inf, np.inf, (9,), np.float32),
'end_effector': gym.spaces.Box(-np.inf, np.inf, (3,), np.float32),
'qpos_objects': gym.spaces.Box(-np.inf, np.inf, (26,), np.float32),
'qvel_objects': gym.spaces.Box(-np.inf, np.inf, (26,), np.float32)}
return gym.spaces.Dict(spaces)
def render(self, mode='rgb_array', resize=True):
params = {'distance': 1.8, 'azimuth': 90, 'elevation': -60,
'crop_box': (16.75, 25.0, 105.0, 88.75), 'size': 120}
camera = mujoco.Camera(
physics=self.physics, height=params['size'],
width=params['size'], camera_id=-1)
camera._render_camera.distance = params['distance'] # pylint: disable=protected-access
camera._render_camera.azimuth = params['azimuth'] # pylint: disable=protected-access
camera._render_camera.elevation = params['elevation'] # pylint: disable=protected-access
camera._render_camera.lookat[:] = [0, 0.535, 1.1] # pylint: disable=protected-access
image = camera.render(depth=False, segmentation=False)
camera._scene.free() # pylint: disable=protected-access
if resize:
image = Image.fromarray(image).crop(box=params['crop_box'])
image = image.resize([self.image_size, self.image_size],
resample=Image.ANTIALIAS)
image = np.asarray(image)
return image
def _ik(self, pos):
out = inverse_kinematics.qpos_from_site_pose(
self.physics_copy, 'end_effector', pos,
joint_names=('panda0_joint1', 'panda0_joint2', 'panda0_joint3',
'panda0_joint4', 'panda0_joint5', 'panda0_joint6'),
inplace=True)
return out.qpos[:]
def _action_to_delta_joint(self, unscaled_value, joint_bounds):
"""Convert actions from [-1, 1] range to joint bounds."""
joint_range = joint_bounds[1] - joint_bounds[0]
return (((unscaled_value + 1) * joint_range) / 2) + joint_bounds[0]
def _convert_action(self, full_action):
"""Converts action from [-1, 1] space to desired joint position."""
full_action = np.array(full_action)
delta_action = full_action[0:3] * self.end_effector_scale
position = (
self.physics.named.data.site_xpos['end_effector'] + delta_action)
joint = self._ik(position)
delta_wrist = self._action_to_delta_joint(full_action[3],
self.joint_bounds[6])
joint[6] = ((self.wrist_scale * delta_wrist) +
self.physics.named.data.qpos[6])
joint[6] = np.clip(joint[6], self.joint_bounds[6][0],
self.joint_bounds[6][1])
joint[7] = self._action_to_delta_joint(full_action[4],
self.joint_bounds[7])
joint[8] = joint[7]
return joint
def step(self, action):
total_reward = 0
for _ in range(self.action_repeat):
joint_position = self._convert_action(action)
for _ in range(10):
self.physics.data.ctrl[0:9] = joint_position[0:9]
# Ensure gravity compensation stays enabled.
self.physics.data.qfrc_applied[0:9] = self.physics.data.qfrc_bias[0:9]
self.physics.step()
self.physics_copy.data.qpos[:] = self.physics.data.qpos[:]
if self.reward == 'dense':
total_reward += self._get_task_reward(self.task, 'dense_reward')
elif self.reward == 'sparse':
total_reward += float(self._get_task_reward(self.task, 'success'))
elif self.reward == 'success':
if self.success:
total_reward += 0 # Only give reward once in case episode continues.
else:
self.success = self._get_task_reward(self.task, 'success')
total_reward += float(self.success)
else:
raise ValueError(self.reward)
self.num_steps += self.action_repeat
if self.episode_length and self.num_steps >= self.episode_length:
done = True
else:
done = False
return self._get_obs(), total_reward, done, {'discount': 1.0}
def _get_init_robot_pos(self):
init_joint_pose = np.array(
[-0.30, -0.4, 0.28, -2.5, 0.13, 1.87, 0.91, 0.01, 0.01])
init_joint_pose += 0.15 * np.random.uniform(
low=self.physics.model.actuator_ctrlrange[:self.num_joints, 0],
high=self.physics.model.actuator_ctrlrange[:self.num_joints, 1])
return init_joint_pose
def reset(self):
"""Resets environment."""
self.success = False
self.num_steps = 0
self.physics.reset()
# Randomize object positions.
self.physics.named.data.qpos['drawer_joint'] -= 0.10 * np.random.random()
self.physics.named.data.qpos['slide_joint'] += 0.20 * np.random.random()
self.physics.named.data.qpos['flat_block'][0] += 0.3 * np.random.random()
self.physics.named.data.qpos['flat_block'][1] += 0.07 * np.random.random()
self.physics.named.data.qpos['ball'][0] += 0.48 * np.random.random()
self.physics.named.data.qpos['ball'][1] += 0.08 * np.random.random()
self.physics.named.data.qpos['upright_block'][0] += (
0.3 * np.random.random() + 0.05)
self.physics.named.data.qpos['upright_block'][1] += (
0.05 * np.random.random())
# Set robot position.
self.physics.data.qpos[:self.num_joints] = self._get_init_robot_pos()
self.physics.data.qvel[:self.num_joints] = np.zeros(9)
# Relax object intersections.
self.physics.forward()
# Copy physics state into IK simulation.
self.physics_copy.data.qpos[:] = self.physics.data.qpos[:]
self.original_pos['ball'] = self.physics.named.data.xpos['ball']
self.original_pos['upright_block'] = self.physics.named.data.xpos[
'upright_block']
self.original_pos['flat_block'] = self.physics.named.data.xpos['flat_block']
self.drawer_opened = False
return self._get_obs()
def _did_not_move(self, block_name):
current_pos = self.physics.named.data.xpos[block_name]
dist = np.linalg.norm(current_pos - self.original_pos[block_name])
return dist < 0.01
def _total_movement(self, block_name, max_dist=5.0):
current_pos = self.physics.named.data.xpos[block_name]
dist = np.linalg.norm(current_pos - self.original_pos[block_name])
return dist / max_dist
def _get_dist_reward(self, object_pos, max_dist=1.0):
eepos = self.physics.named.data.site_xpos['end_effector']
dist = np.linalg.norm(eepos - object_pos)
reward = 1 - (dist / max_dist)
return max(0, min(1, reward))
def _slide_reward(self, reward_type='dense_reward'):
blocks = ['flat_block', 'upright_block', 'ball']
if reward_type == 'dense_reward':
door_pos = self.physics.named.data.qpos['slide_joint'][0] / 0.6
target_pos = (self.physics.named.data.site_xpos['slide_handle'] -
np.array([0.15, 0, 0]))
dist_reward = self._get_dist_reward(target_pos)
did_not_move_reward = (0.33 * self._did_not_move(blocks[0]) +
0.33 * self._did_not_move(blocks[1]) +
0.34 * self._did_not_move(blocks[2]))
task_reward = (0.75 * door_pos) + (0.25 * dist_reward)
return (0.9 * task_reward) + (0.1 * did_not_move_reward)
elif reward_type == 'success':
return 1 * (self.physics.named.data.qpos['slide_joint'] > 0.55)
def _drawer_reward(self, reward_type='dense_reward'):
if reward_type == 'dense_reward':
drawer_pos = abs(self.physics.named.data.qpos['drawer_joint'][0]) / 0.3
dist_reward = self._get_dist_reward(
self.physics.named.data.geom_xpos['drawer_handle'])
return (0.75 * drawer_pos) + (0.25 * dist_reward)
elif reward_type == 'success':
return 1 * (self.physics.named.data.qpos['drawer_joint'] < -0.2)
def _button_reward(self, color, reward_type='dense_reward'):
press_button = (
self.physics.named.data.qpos[color + '_light'][0] < -0.00453)
if reward_type == 'dense_reward':
dist_reward = self._get_dist_reward(
self.physics.named.data.xpos[color + '_button'])
return (0.25 * press_button) + (0.75 * dist_reward)
elif reward_type == 'success':
return 1.0 * press_button
def _stack_reward(self, reward_type='dense_reward'):
target_offset = [0, 0, 0.0377804]
current_offset = (self.physics.named.data.xpos['upright_block'] -
self.physics.named.data.xpos['flat_block'])
offset_difference = np.linalg.norm(target_offset - current_offset)
dist_reward = self._get_dist_reward(
self.physics.named.data.xpos['upright_block'])
if reward_type == 'dense_reward':
return -offset_difference + dist_reward
elif reward_type == 'success':
return offset_difference < 0.04
def _push_off_table(self, block_name, reward_type='dense_reward'):
blocks = ['flat_block', 'upright_block', 'ball']
blocks.remove(block_name)
if reward_type == 'dense_reward':
block_pushed = (1 - (self.physics.named.data.xpos[block_name][2] /
self.original_pos[block_name][2]))
block_0_stay_put = (1 - self._total_movement(blocks[0]))
block_1_stay_put = (1 - self._total_movement(blocks[1]))
reward = ((0.8 * block_pushed) + (0.1 * block_0_stay_put) +
(0.1 * block_1_stay_put))
reward = max(0, min(1, reward))
dist_reward = self._get_dist_reward(
self.physics.named.data.xpos[block_name])
return (0.75 * reward) + (0.25 * dist_reward)
elif reward_type == 'success':
return 1 * ((self.physics.named.data.qpos[block_name][2] < 0.6) and
self._did_not_move(blocks[0]) and
self._did_not_move(blocks[1]))
def _put_in_bin(self, block_name, reward_type='dense_reward'):
pos = self.physics.named.data.xpos[block_name]
success = (pos[0] > 0.28) and (pos[0] < 0.52) and (pos[1] > 0.38) and (
pos[1] < 0.62) and (pos[2] > 0) and (pos[2] < 0.4)
if reward_type == 'dense_reward':
dist_reward = self._get_dist_reward(
self.physics.named.data.xpos[block_name])
return (0.5 * dist_reward) + (0.5 * float(success))
elif reward_type == 'success':
return 1 * success
def _put_in_shelf(self, block_name, reward_type='dense_reward'):
x_success = (self.physics.named.data.xpos[block_name][0] > 0.2)
y_success = (self.physics.named.data.xpos[block_name][1] > 1.0)
success = x_success and y_success
blocks = ['flat_block', 'upright_block', 'ball']
blocks.remove(block_name)
if reward_type == 'dense_reward':
target_x_y = np.array([0.4, 1.1])
block_dist_reward = 1 - (np.linalg.norm(
target_x_y - self.physics.named.data.xpos[block_name][0:2]))
dist_reward = self._get_dist_reward(
self.physics.named.data.xpos[block_name])
block_0_stay_put = (1 - self._total_movement(blocks[0]))
block_1_stay_put = (1 - self._total_movement(blocks[1]))
block_in_shelf = ((0.33 * dist_reward) + (0.33 * block_dist_reward) +
(0.34 * float(success)))
reward = ((0.5 * block_in_shelf) + (0.25 * block_0_stay_put) +
(0.25 * block_1_stay_put))
return reward
elif reward_type == 'success':
return 1 * success
def _lift_block(self, block_name, reward_type='dense_reward'):
if reward_type == 'dense_reward':
dist_reward = self._get_dist_reward(
self.physics.named.data.xpos[block_name])
block_reward = (self.physics.named.data.xpos[block_name][2] -
self.original_pos[block_name][2]) * 10
block_reward = max(0, min(1, block_reward))
return (0.85 * block_reward) + (0.15 * dist_reward)
elif reward_type == 'success':
success_criteria = {'upright_block': 0.86, 'ball': 0.81,
'flat_block': 0.78}
threshold = success_criteria[block_name]
return 1 * (self.physics.named.data.xpos[block_name][2] > threshold)
def _get_task_reward(self, task, reward_type):
reward = self.reward_functions[task](reward_type)
reward = max(0, min(1, reward))
return reward
def _get_obs(self):
return {'image': self.render(resize=True),
'qpos_robot': self.physics.data.qpos[:self.num_joints].copy(),
'qvel_robot': self.physics.data.qvel[:self.num_joints].copy(),
'end_effector': self.physics.named.data.site_xpos['end_effector'],
'qpos_objects': self.physics.data.qvel[self.num_joints:].copy(),
'qvel_objects': self.physics.data.qvel[self.num_joints:].copy()}
| [
"numpy.random.uniform",
"dm_control.mujoco.Physics.from_xml_path",
"os.path.dirname",
"numpy.zeros",
"numpy.ones",
"numpy.clip",
"numpy.asarray",
"numpy.random.random",
"numpy.array",
"numpy.linalg.norm",
"gym.spaces.Box",
"PIL.Image.fromarray",
"dm_control.mujoco.Camera",
"dm_control.util... | [((651, 691), 'dm_control.mujoco.Physics.from_xml_path', 'mujoco.Physics.from_xml_path', (['model_path'], {}), '(model_path)\n', (679, 691), False, 'from dm_control import mujoco\n'), ((4111, 4134), 'gym.spaces.Dict', 'gym.spaces.Dict', (['spaces'], {}), '(spaces)\n', (4126, 4134), False, 'import gym\n'), ((4332, 4431), 'dm_control.mujoco.Camera', 'mujoco.Camera', ([], {'physics': 'self.physics', 'height': "params['size']", 'width': "params['size']", 'camera_id': '(-1)'}), "(physics=self.physics, height=params['size'], width=params[\n 'size'], camera_id=-1)\n", (4345, 4431), False, 'from dm_control import mujoco\n'), ((5211, 5427), 'dm_control.utils.inverse_kinematics.qpos_from_site_pose', 'inverse_kinematics.qpos_from_site_pose', (['self.physics_copy', '"""end_effector"""', 'pos'], {'joint_names': "('panda0_joint1', 'panda0_joint2', 'panda0_joint3', 'panda0_joint4',\n 'panda0_joint5', 'panda0_joint6')", 'inplace': '(True)'}), "(self.physics_copy, 'end_effector',\n pos, joint_names=('panda0_joint1', 'panda0_joint2', 'panda0_joint3',\n 'panda0_joint4', 'panda0_joint5', 'panda0_joint6'), inplace=True)\n", (5249, 5427), False, 'from dm_control.utils import inverse_kinematics\n'), ((5875, 5896), 'numpy.array', 'np.array', (['full_action'], {}), '(full_action)\n', (5883, 5896), True, 'import numpy as np\n'), ((6328, 6395), 'numpy.clip', 'np.clip', (['joint[6]', 'self.joint_bounds[6][0]', 'self.joint_bounds[6][1]'], {}), '(joint[6], self.joint_bounds[6][0], self.joint_bounds[6][1])\n', (6335, 6395), True, 'import numpy as np\n'), ((7853, 7917), 'numpy.array', 'np.array', (['[-0.3, -0.4, 0.28, -2.5, 0.13, 1.87, 0.91, 0.01, 0.01]'], {}), '([-0.3, -0.4, 0.28, -2.5, 0.13, 1.87, 0.91, 0.01, 0.01])\n', (7861, 7917), True, 'import numpy as np\n'), ((9107, 9118), 'numpy.zeros', 'np.zeros', (['(9)'], {}), '(9)\n', (9115, 9118), True, 'import numpy as np\n'), ((9705, 9764), 'numpy.linalg.norm', 'np.linalg.norm', (['(current_pos - self.original_pos[block_name])'], {}), '(current_pos - self.original_pos[block_name])\n', (9719, 9764), True, 'import numpy as np\n'), ((9914, 9973), 'numpy.linalg.norm', 'np.linalg.norm', (['(current_pos - self.original_pos[block_name])'], {}), '(current_pos - self.original_pos[block_name])\n', (9928, 9973), True, 'import numpy as np\n'), ((10131, 10165), 'numpy.linalg.norm', 'np.linalg.norm', (['(eepos - object_pos)'], {}), '(eepos - object_pos)\n', (10145, 10165), True, 'import numpy as np\n'), ((12169, 12215), 'numpy.linalg.norm', 'np.linalg.norm', (['(target_offset - current_offset)'], {}), '(target_offset - current_offset)\n', (12183, 12215), True, 'import numpy as np\n'), ((586, 611), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (601, 611), False, 'import os\n'), ((3487, 3511), 'numpy.ones', 'np.ones', (['self.action_dim'], {}), '(self.action_dim)\n', (3494, 3511), True, 'import numpy as np\n'), ((3589, 3660), 'gym.spaces.Box', 'gym.spaces.Box', (['(0)', '(255)', '(self.image_size, self.image_size, 3)', 'np.uint8'], {}), '(0, 255, (self.image_size, self.image_size, 3), np.uint8)\n', (3603, 3660), False, 'import gym\n'), ((3697, 3761), 'gym.spaces.Box', 'gym.spaces.Box', (['self.joint_bounds[:, 0]', 'self.joint_bounds[:, 1]'], {}), '(self.joint_bounds[:, 0], self.joint_bounds[:, 1])\n', (3711, 3761), False, 'import gym\n'), ((3822, 3871), 'gym.spaces.Box', 'gym.spaces.Box', (['(-np.inf)', 'np.inf', '(9,)', 'np.float32'], {}), '(-np.inf, np.inf, (9,), np.float32)\n', (3836, 3871), False, 'import gym\n'), ((3897, 3946), 'gym.spaces.Box', 'gym.spaces.Box', (['(-np.inf)', 'np.inf', '(3,)', 'np.float32'], {}), '(-np.inf, np.inf, (3,), np.float32)\n', (3911, 3946), False, 'import gym\n'), ((3972, 4022), 'gym.spaces.Box', 'gym.spaces.Box', (['(-np.inf)', 'np.inf', '(26,)', 'np.float32'], {}), '(-np.inf, np.inf, (26,), np.float32)\n', (3986, 4022), False, 'import gym\n'), ((4048, 4098), 'gym.spaces.Box', 'gym.spaces.Box', (['(-np.inf)', 'np.inf', '(26,)', 'np.float32'], {}), '(-np.inf, np.inf, (26,), np.float32)\n', (4062, 4098), False, 'import gym\n'), ((5143, 5160), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (5153, 5160), True, 'import numpy as np\n'), ((7958, 8114), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'self.physics.model.actuator_ctrlrange[:self.num_joints, 0]', 'high': 'self.physics.model.actuator_ctrlrange[:self.num_joints, 1]'}), '(low=self.physics.model.actuator_ctrlrange[:self.\n num_joints, 0], high=self.physics.model.actuator_ctrlrange[:self.\n num_joints, 1])\n', (7975, 8114), True, 'import numpy as np\n'), ((8367, 8385), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (8383, 8385), True, 'import numpy as np\n'), ((8444, 8462), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (8460, 8462), True, 'import numpy as np\n'), ((8523, 8541), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (8539, 8541), True, 'import numpy as np\n'), ((8602, 8620), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (8618, 8620), True, 'import numpy as np\n'), ((8675, 8693), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (8691, 8693), True, 'import numpy as np\n'), ((8748, 8766), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (8764, 8766), True, 'import numpy as np\n'), ((8939, 8957), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (8955, 8957), True, 'import numpy as np\n'), ((14311, 14331), 'numpy.array', 'np.array', (['[0.4, 1.1]'], {}), '([0.4, 1.1])\n', (14319, 14331), True, 'import numpy as np\n'), ((3461, 3485), 'numpy.ones', 'np.ones', (['self.action_dim'], {}), '(self.action_dim)\n', (3468, 3485), True, 'import numpy as np\n'), ((8839, 8857), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (8855, 8857), True, 'import numpy as np\n'), ((10544, 10566), 'numpy.array', 'np.array', (['[0.15, 0, 0]'], {}), '([0.15, 0, 0])\n', (10552, 10566), True, 'import numpy as np\n'), ((14363, 14437), 'numpy.linalg.norm', 'np.linalg.norm', (['(target_x_y - self.physics.named.data.xpos[block_name][0:2])'], {}), '(target_x_y - self.physics.named.data.xpos[block_name][0:2])\n', (14377, 14437), True, 'import numpy as np\n'), ((4961, 4983), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (4976, 4983), False, 'from PIL import Image\n')] |
import tensorflow as tf
import numpy as np
prime_states = {
2 : True,
3 : True,
4 : False
}
def is_prime(givenNumber):
if givenNumber not in prime_states:
prime_states[givenNumber] = True
for num in range(2, int(givenNumber ** 0.5) + 1):
if givenNumber % num == 0:
prime_states[givenNumber] = False
break
return prime_states[givenNumber]
def get_next_prime(x):
while not is_prime(x):
x = x+1
return x
if __name__ == "__main__":
a = get_next_prime(500)
print(a)
prime_model = tf.keras.Sequential()
prime_model.add(tf.keras.layers.Dense(24, activation='relu', input_dim=1))
prime_model.add(tf.keras.layers.Dense(24, activation='relu'))
prime_model.add(tf.keras.layers.Dense(24, activation='relu'))
prime_model.add(tf.keras.layers.Dense(24, activation='relu'))
prime_model.add(tf.keras.layers.Dense(24, activation='relu'))
prime_model.add(tf.keras.layers.Dense(1))
opt = tf.keras.optimizers.Adam()
loss = tf.keras.losses.mae
prime_model.compile(optimizer=opt, loss=loss)
prime_model.summary()
batch_size = 64
steps = 500
##### Run the Training #####
for i in range(steps):
x = np.array(range(i*batch_size))
y = np.array([get_next_prime(n) for n in x])
r = prime_model.train_on_batch(x, y)
print(r)
# print(r.history['loss'])
##### Run the Evaluation #####
for i in range(steps*batch_size,steps*batch_size+50):
x = np.array(i).reshape((1,1))
expected = get_next_prime(i)
pred = prime_model.predict(x)
print("input: %i\texpected: %i\tpredicted %i"%(i, expected, pred[0]))
| [
"tensorflow.keras.layers.Dense",
"tensorflow.keras.optimizers.Adam",
"numpy.array",
"tensorflow.keras.Sequential"
] | [((595, 616), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), '()\n', (614, 616), True, 'import tensorflow as tf\n'), ((1017, 1043), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {}), '()\n', (1041, 1043), True, 'import tensorflow as tf\n'), ((637, 694), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(24)'], {'activation': '"""relu"""', 'input_dim': '(1)'}), "(24, activation='relu', input_dim=1)\n", (658, 694), True, 'import tensorflow as tf\n'), ((716, 760), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(24)'], {'activation': '"""relu"""'}), "(24, activation='relu')\n", (737, 760), True, 'import tensorflow as tf\n'), ((782, 826), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(24)'], {'activation': '"""relu"""'}), "(24, activation='relu')\n", (803, 826), True, 'import tensorflow as tf\n'), ((848, 892), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(24)'], {'activation': '"""relu"""'}), "(24, activation='relu')\n", (869, 892), True, 'import tensorflow as tf\n'), ((914, 958), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(24)'], {'activation': '"""relu"""'}), "(24, activation='relu')\n", (935, 958), True, 'import tensorflow as tf\n'), ((980, 1004), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {}), '(1)\n', (1001, 1004), True, 'import tensorflow as tf\n'), ((1547, 1558), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (1555, 1558), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
# Compute the x and y coordinates for points on a sine curve
x = np.arange(0, 3 * np.pi, 0.1)
y_sin = np.sin(x)
y_cos = np.cos(x)
# Plot the points using matplotlib
plt.plot(x, y_sin)
plt.plot(x,y_cos)
plt.show() # You must call plt.show() to make graphics appear. | [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.sin",
"numpy.arange",
"numpy.cos"
] | [((117, 145), 'numpy.arange', 'np.arange', (['(0)', '(3 * np.pi)', '(0.1)'], {}), '(0, 3 * np.pi, 0.1)\n', (126, 145), True, 'import numpy as np\n'), ((154, 163), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (160, 163), True, 'import numpy as np\n'), ((172, 181), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (178, 181), True, 'import numpy as np\n'), ((217, 235), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_sin'], {}), '(x, y_sin)\n', (225, 235), True, 'import matplotlib.pyplot as plt\n'), ((236, 254), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_cos'], {}), '(x, y_cos)\n', (244, 254), True, 'import matplotlib.pyplot as plt\n'), ((254, 264), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (262, 264), True, 'import matplotlib.pyplot as plt\n')] |
from math import ceil
import torch
from torch import nn
from torch.nn.modules import dropout
import numpy as np
class Model(nn.Module):
# delta_t -- input vector, q -- output vector, bdiv -- batches division
def __init__(self, delta_t_init, q_init, bsize, nlayers, device, dropout):
super(Model, self).__init__()
self.bsize = bsize
self.bdiv = int(len(delta_t_init) / bsize)
# neglecting last len(q) - bsize * bdiv elements
self.delta_t = delta_t_init[0:self.bsize * self.bdiv].clone().detach()
self.q = q_init[0:self.bsize * self.bdiv].clone().detach()
self.bdiv = self.bdiv
self.lstm = nn.LSTM(input_size=self.bsize,
hidden_size=self.bsize, num_layers=nlayers,
dropout=dropout)
self.h0 = torch.randn(nlayers, 1, self.bsize)
self.c0 = torch.randn(nlayers, 1, self.bsize)
self.linear = nn.Linear(in_features=self.bsize*nlayers,
out_features=self.bsize)
self.device = device
self.nlayers = nlayers
def forward(self, tr):
tr = tr.view(1, 1, self.bsize)
device = self.device
tr = tr.to(device).float()
h0 = self.h0.to(device).float()
c0 = self.c0.to(device).float()
lstm = self.lstm.to(device)
linear = self.linear.to(device).float()
x, (h_out, _) = lstm(tr, (h0, c0))
h_out = h_out.flatten()
x = linear(h_out)
return x
class Dset:
def __init__(self, ts, train_validation, avg_const=1):
self.head = ts.columns.values
self.ts = ts
self.avg_const = avg_const
self.train_validation = train_validation
# averaging the timeseries
def ts_average(self):
c = self.avg_const
v1 = self.ts[self.head[0]].copy() # in my case delta_t
v2 = self.ts[self.head[1]].copy() # in my case q
itr = int(len(v1)/c)
avg_list = []
v1_avg = np.array([])
v2_avg = np.array([])
for j in range(itr):
##new_v1 = probaj hoće ubrzat da definiraš vrijednos pa onda appendaš
v1_avg = np.append(v1_avg, np.sum(v1[j*c:(j+1)*c])/c)
v2_avg = np.append(v2_avg, np.sum(v2[j*c:(j+1)*c])/c)
return v1_avg, v2_avg
# train - validation split
def tv_split(self, v1, v2):
limit = int(len(v1) * self.train_validation)
end = len(v1)
v1_train = v1[0:limit]
v2_train = v2[0:limit]
v1_validation = v1[limit:end]
v2_validation = v2[limit:end]
first = []
last = []
for i in range(limit):
first.append(i)
for i in range(limit, len(v1)):
last.append(i)
return [v1_train, v2_train, first, v1_validation, v2_validation, last]
| [
"numpy.sum",
"torch.randn",
"numpy.array",
"torch.nn.Linear",
"torch.nn.LSTM"
] | [((662, 757), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'self.bsize', 'hidden_size': 'self.bsize', 'num_layers': 'nlayers', 'dropout': 'dropout'}), '(input_size=self.bsize, hidden_size=self.bsize, num_layers=nlayers,\n dropout=dropout)\n', (669, 757), False, 'from torch import nn\n'), ((828, 863), 'torch.randn', 'torch.randn', (['nlayers', '(1)', 'self.bsize'], {}), '(nlayers, 1, self.bsize)\n', (839, 863), False, 'import torch\n'), ((882, 917), 'torch.randn', 'torch.randn', (['nlayers', '(1)', 'self.bsize'], {}), '(nlayers, 1, self.bsize)\n', (893, 917), False, 'import torch\n'), ((940, 1008), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(self.bsize * nlayers)', 'out_features': 'self.bsize'}), '(in_features=self.bsize * nlayers, out_features=self.bsize)\n', (949, 1008), False, 'from torch import nn\n'), ((2005, 2017), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2013, 2017), True, 'import numpy as np\n'), ((2035, 2047), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2043, 2047), True, 'import numpy as np\n'), ((2199, 2228), 'numpy.sum', 'np.sum', (['v1[j * c:(j + 1) * c]'], {}), '(v1[j * c:(j + 1) * c])\n', (2205, 2228), True, 'import numpy as np\n'), ((2265, 2294), 'numpy.sum', 'np.sum', (['v2[j * c:(j + 1) * c]'], {}), '(v2[j * c:(j + 1) * c])\n', (2271, 2294), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.