code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""
Original Source: https://github.com/fizyr/keras-retinanet
"""
import numpy as np
from .anchor_parameters import AnchorParameters
from .anchor_calc import compute_overlap
#from keras.utils.generic_utils import to_list
from tensorflow.python.keras.utils.generic_utils import to_list
def layer_shapes(image_shape, model):
"""Compute layer shapes given input image shape and the model.
Args
image_shape: The shape of the image.
model: The model to use for computing how the image shape is transformed in the pyramid.
Returns
A dictionary mapping layer names to image shapes.
"""
shape = {}
input_shapes = to_list(model.input_shape)
for i, input_name in enumerate(model.input_names):
# shape[input_name] = (None,) + image_shape
shape[input_name] = input_shapes[i]
if None in input_shapes[i][1:]:
if i > 0:
raise Exception("Variable image size unsupported when multiple \
inputs are active.")
else:
shape[input_name] = (None,) + image_shape
for layer in model.layers[1:]:
nodes = layer._inbound_nodes
for node in nodes:
inputs = [shape[lr.name] for lr in node.inbound_layers]
if not inputs:
continue
shape[layer.name] = layer.compute_output_shape(inputs[0] if len(inputs) == 1 else inputs)
return shape
def make_shapes_callback(model):
""" Make a function for getting the shape of the pyramid levels.
"""
def get_shapes(image_shape, pyramid_levels):
shape = layer_shapes(image_shape, model)
image_shapes = [shape["P{}".format(level)][1:3] for level in pyramid_levels]
return image_shapes
return get_shapes
def guess_shapes(image_shape, pyramid_levels):
"""Guess shapes based on pyramid levels.
Args
image_shape: The shape of the image.
pyramid_levels: A list of what pyramid levels are used.
Returns
A list of image shapes at each pyramid level.
"""
image_shape = np.array(image_shape[:2])
image_shapes = [(image_shape + 2 ** x - 1) // (2 ** x) for x in pyramid_levels]
return image_shapes
def anchors_for_shape(
image_shape,
pyramid_levels=None,
anchor_params=None,
shapes_callback=None,
):
""" Generators anchors for a given shape.
Args
image_shape: The shape of the image.
pyramid_levels: List of ints representing which pyramids to use (defaults to [3, 4, 5, 6, 7]).
anchor_params: Struct containing anchor parameters. If None, default values are used.
shapes_callback: Function to call for getting the shape of the image at different pyramid levels.
Returns
np.array of shape (N, 4) containing the (x1, y1, x2, y2) coordinates for the anchors.
"""
if pyramid_levels is None:
pyramid_levels = [3, 4, 5, 6, 7]
if anchor_params is None:
anchor_params = AnchorParameters.default
if shapes_callback is None:
shapes_callback = guess_shapes
image_shapes = shapes_callback(image_shape, pyramid_levels)
# compute anchors over all pyramid levels
all_anchors = np.zeros((0, 4))
for idx, p in enumerate(pyramid_levels):
anchors = generate_anchors(
base_size=anchor_params.sizes[idx],
ratios=anchor_params.ratios,
scales=anchor_params.scales
)
shifted_anchors = shift(image_shapes[idx], anchor_params.strides[idx], anchors)
all_anchors = np.append(all_anchors, shifted_anchors, axis=0)
return all_anchors
def shift(shape, stride, anchors):
""" Produce shifted anchors based on shape of the map and stride size.
Args
shape : Shape to shift the anchors over.
stride : Stride to shift the anchors with over the shape.
anchors: The anchors to apply at each location.
"""
# create a grid starting from half stride from the top left corner
shift_x = (np.arange(0, shape[1]) + 0.5) * stride
shift_y = (np.arange(0, shape[0]) + 0.5) * stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((
shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel()
)).transpose()
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = anchors.shape[0]
K = shifts.shape[0]
all_anchors = (anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
all_anchors = all_anchors.reshape((K * A, 4))
return all_anchors
def generate_anchors(base_size=16, ratios=None, scales=None):
"""
Generate anchor (reference) windows by enumerating aspect ratios X
scales w.r.t. a reference window.
"""
if ratios is None:
ratios = AnchorParameters.default.ratios
if scales is None:
scales = AnchorParameters.default.scales
num_anchors = len(ratios) * len(scales)
# initialize output anchors
anchors = np.zeros((num_anchors, 4))
# scale base_size
anchors[:, 2:] = base_size * np.tile(scales, (2, len(ratios))).T
# compute areas of anchors
areas = anchors[:, 2] * anchors[:, 3]
# correct for ratios
anchors[:, 2] = np.sqrt(areas / np.repeat(ratios, len(scales)))
anchors[:, 3] = anchors[:, 2] * np.repeat(ratios, len(scales))
# transform from (x_ctr, y_ctr, w, h) -> (x1, y1, x2, y2)
anchors[:, 0::2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T
anchors[:, 1::2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T
return anchors
def bbox_transform(anchors, gt_boxes, mean=None, std=None):
"""Compute bounding-box regression targets for an image."""
if mean is None:
mean = np.array([0, 0, 0, 0])
if std is None:
std = np.array([0.2, 0.2, 0.2, 0.2])
if isinstance(mean, (list, tuple)):
mean = np.array(mean)
elif not isinstance(mean, np.ndarray):
raise ValueError('Expected mean to be a np.ndarray, list or tuple. Received: {}'.format(type(mean)))
if isinstance(std, (list, tuple)):
std = np.array(std)
elif not isinstance(std, np.ndarray):
raise ValueError('Expected std to be a np.ndarray, list or tuple. Received: {}'.format(type(std)))
anchor_widths = anchors[:, 2] - anchors[:, 0]
anchor_heights = anchors[:, 3] - anchors[:, 1]
targets_dx1 = (gt_boxes[:, 0] - anchors[:, 0]) / anchor_widths
targets_dy1 = (gt_boxes[:, 1] - anchors[:, 1]) / anchor_heights
targets_dx2 = (gt_boxes[:, 2] - anchors[:, 2]) / anchor_widths
targets_dy2 = (gt_boxes[:, 3] - anchors[:, 3]) / anchor_heights
targets = np.stack((targets_dx1, targets_dy1, targets_dx2, targets_dy2))
targets = targets.T
targets = (targets - mean) / std
return targets
def compute_gt_annotations(
anchors,
annotations,
negative_overlap=0.4,
positive_overlap=0.5
):
""" Obtain indices of gt annotations with the greatest overlap.
Args
anchors: np.array of annotations of shape (N, 4) for (x1, y1, x2, y2).
annotations: np.array of shape (N, 5) for (x1, y1, x2, y2, label).
negative_overlap: IoU overlap for negative anchors (all anchors with overlap < negative_overlap are negative).
positive_overlap: IoU overlap or positive anchors (all anchors with overlap > positive_overlap are positive).
Returns
positive_indices: indices of positive anchors
ignore_indices: indices of ignored anchors
argmax_overlaps_inds: ordered overlaps indices
"""
overlaps = compute_overlap(anchors.astype(np.float64), annotations.astype(np.float64))
argmax_overlaps_inds = np.argmax(overlaps, axis=1)
max_overlaps = overlaps[np.arange(overlaps.shape[0]), argmax_overlaps_inds]
# assign "dont care" labels
positive_indices = max_overlaps >= positive_overlap
ignore_indices = (max_overlaps > negative_overlap) & ~positive_indices
return positive_indices, ignore_indices, argmax_overlaps_inds
| [
"numpy.stack",
"numpy.meshgrid",
"numpy.argmax",
"numpy.zeros",
"tensorflow.python.keras.utils.generic_utils.to_list",
"numpy.append",
"numpy.array",
"numpy.tile",
"numpy.arange"
] | [((660, 686), 'tensorflow.python.keras.utils.generic_utils.to_list', 'to_list', (['model.input_shape'], {}), '(model.input_shape)\n', (667, 686), False, 'from tensorflow.python.keras.utils.generic_utils import to_list\n'), ((2101, 2126), 'numpy.array', 'np.array', (['image_shape[:2]'], {}), '(image_shape[:2])\n', (2109, 2126), True, 'import numpy as np\n'), ((3228, 3244), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {}), '((0, 4))\n', (3236, 3244), True, 'import numpy as np\n'), ((4158, 4187), 'numpy.meshgrid', 'np.meshgrid', (['shift_x', 'shift_y'], {}), '(shift_x, shift_y)\n', (4169, 4187), True, 'import numpy as np\n'), ((5105, 5131), 'numpy.zeros', 'np.zeros', (['(num_anchors, 4)'], {}), '((num_anchors, 4))\n', (5113, 5131), True, 'import numpy as np\n'), ((6748, 6810), 'numpy.stack', 'np.stack', (['(targets_dx1, targets_dy1, targets_dx2, targets_dy2)'], {}), '((targets_dx1, targets_dy1, targets_dx2, targets_dy2))\n', (6756, 6810), True, 'import numpy as np\n'), ((7777, 7804), 'numpy.argmax', 'np.argmax', (['overlaps'], {'axis': '(1)'}), '(overlaps, axis=1)\n', (7786, 7804), True, 'import numpy as np\n'), ((3579, 3626), 'numpy.append', 'np.append', (['all_anchors', 'shifted_anchors'], {'axis': '(0)'}), '(all_anchors, shifted_anchors, axis=0)\n', (3588, 3626), True, 'import numpy as np\n'), ((5546, 5582), 'numpy.tile', 'np.tile', (['(anchors[:, 2] * 0.5)', '(2, 1)'], {}), '(anchors[:, 2] * 0.5, (2, 1))\n', (5553, 5582), True, 'import numpy as np\n'), ((5609, 5645), 'numpy.tile', 'np.tile', (['(anchors[:, 3] * 0.5)', '(2, 1)'], {}), '(anchors[:, 3] * 0.5, (2, 1))\n', (5616, 5645), True, 'import numpy as np\n'), ((5831, 5853), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (5839, 5853), True, 'import numpy as np\n'), ((5888, 5918), 'numpy.array', 'np.array', (['[0.2, 0.2, 0.2, 0.2]'], {}), '([0.2, 0.2, 0.2, 0.2])\n', (5896, 5918), True, 'import numpy as np\n'), ((5975, 5989), 'numpy.array', 'np.array', (['mean'], {}), '(mean)\n', (5983, 5989), True, 'import numpy as np\n'), ((6196, 6209), 'numpy.array', 'np.array', (['std'], {}), '(std)\n', (6204, 6209), True, 'import numpy as np\n'), ((4041, 4063), 'numpy.arange', 'np.arange', (['(0)', 'shape[1]'], {}), '(0, shape[1])\n', (4050, 4063), True, 'import numpy as np\n'), ((4095, 4117), 'numpy.arange', 'np.arange', (['(0)', 'shape[0]'], {}), '(0, shape[0])\n', (4104, 4117), True, 'import numpy as np\n'), ((7833, 7861), 'numpy.arange', 'np.arange', (['overlaps.shape[0]'], {}), '(overlaps.shape[0])\n', (7842, 7861), True, 'import numpy as np\n')] |
'''
Theano utility functions
'''
import sys
import json
import cPickle as pkl
import numpy
from collections import OrderedDict
import theano
import theano.tensor as tensor
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
floatX = theano.config.floatX
numpy_floatX = numpy.typeDict[floatX]
# float16 warning
if floatX == 'float16':
bad = True
try:
[major_v, minor_v, sub_v] = map(int, theano.version.short_version.split('.'))
# When a version of Theano that supports float16 without bugs is released, add a check here
except:
pass
if bad:
print >> sys.stderr, "Warning: float16 may not be fully supported by the current version of Theano"
# push parameters to Theano shared variables
def zip_to_theano(params, tparams):
for kk, vv in params.iteritems():
tparams[kk].set_value(vv)
# pull parameters from Theano shared variables
def unzip_from_theano(zipped, excluding_prefix=None):
new_params = OrderedDict()
for kk, vv in zipped.iteritems():
if excluding_prefix and (kk.startswith(excluding_prefix)):
continue
new_params[kk] = vv.get_value()
return new_params
# get the list of parameters: Note that tparams must be OrderedDict
def itemlist(tparams):
return [vv for kk, vv in tparams.iteritems()]
# make prefix-appended name
def pp(pp, name):
return '%s_%s' % (pp, name)
# initialize Theano shared variables according to the initial parameters
def init_theano_params(params):
tparams = OrderedDict()
for kk, pp in params.iteritems():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
# load parameters
def load_params(path, params, with_prefix=''):
try:
pp = numpy.load(path)
except IOError:
pp = numpy.load(path + '.npz')
new_params = OrderedDict()
for kk, vv in params.iteritems():
if kk not in pp:
logging.warn('%s is not in the archive' % kk)
continue
if kk == "zipped_params":
continue
new_params[with_prefix+kk] = pp[kk].astype(floatX, copy=False)
params.update(new_params)
return params
# load parameters of the optimizer
def load_optimizer_params(path, optimizer_name):
params = {}
try:
pp = numpy.load(path)
except IOError:
pp = numpy.load(path + '.npz')
for kk in pp:
if kk.startswith(optimizer_name):
params[kk] = pp[kk].astype(floatX, copy=False)
return params
# save model parameters, optimizer parameters and progress
def save(model_params, optimizer_params, training_progress, base_filename, file_float_type='float32'):
if file_float_type != floatX:
new_model_params, new_optimizer_params = {}, {}
for kk, vv in model_params.iteritems():
new_model_params[kk] = vv.astype(file_float_type)
for kk, vv in optimizer_params.iteritems():
new_optimizer_params[kk] = vv.astype(file_float_type)
model_params, optimizer_params = new_model_params, new_optimizer_params
numpy.savez(base_filename, **model_params)
numpy.savez(base_filename + '.gradinfo', **optimizer_params)
training_progress.save_to_json(base_filename + '.progress.json')
def tanh(x):
return tensor.tanh(x)
def linear(x):
return x
def concatenate(tensor_list, axis=0):
"""
Alternative implementation of `theano.tensor.concatenate`.
This function does exactly the same thing, but contrary to Theano's own
implementation, the gradient is implemented on the GPU.
Backpropagating through `theano.tensor.concatenate` yields slowdowns
because the inverse operation (splitting) needs to be done on the CPU.
This implementation does not have that problem.
:usage:
>>> x, y = theano.tensor.matrices('x', 'y')
>>> c = concatenate([x, y], axis=1)
:parameters:
- tensor_list : list
list of Theano tensor expressions that should be concatenated.
- axis : int
the tensors will be joined along this axis.
:returns:
- out : tensor
the concatenated tensor expression.
"""
concat_size = sum(tt.shape[axis] for tt in tensor_list)
output_shape = ()
for k in range(axis):
output_shape += (tensor_list[0].shape[k],)
output_shape += (concat_size,)
for k in range(axis + 1, tensor_list[0].ndim):
output_shape += (tensor_list[0].shape[k],)
out = tensor.zeros(output_shape)
offset = 0
for tt in tensor_list:
indices = ()
for k in range(axis):
indices += (slice(None),)
indices += (slice(offset, offset + tt.shape[axis]),)
for k in range(axis + 1, tensor_list[0].ndim):
indices += (slice(None),)
out = tensor.set_subtensor(out[indices], tt)
offset += tt.shape[axis]
return out
# return name of word embedding for factor i
# special handling of factor 0 for backward compatibility
def embedding_name(i):
if i == 0:
return 'Wemb'
else:
return 'Wemb'+str(i)
# Zero out all parameters
def zero_all(params):
for kk, vv in params.iteritems():
vv[:] = numpy.zeros_like(vv)
def get_slice(array, n, dim):
if array.ndim == 3:
return array[:, :, n*dim:(n+1)*dim]
return array[:, n*dim:(n+1)*dim]
| [
"theano.tensor.tanh",
"numpy.load",
"numpy.zeros_like",
"theano.version.short_version.split",
"theano.tensor.set_subtensor",
"theano.tensor.zeros",
"theano.shared",
"collections.OrderedDict",
"numpy.savez"
] | [((977, 990), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (988, 990), False, 'from collections import OrderedDict\n'), ((1521, 1534), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1532, 1534), False, 'from collections import OrderedDict\n'), ((1831, 1844), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1842, 1844), False, 'from collections import OrderedDict\n'), ((3064, 3106), 'numpy.savez', 'numpy.savez', (['base_filename'], {}), '(base_filename, **model_params)\n', (3075, 3106), False, 'import numpy\n'), ((3111, 3171), 'numpy.savez', 'numpy.savez', (["(base_filename + '.gradinfo')"], {}), "(base_filename + '.gradinfo', **optimizer_params)\n", (3122, 3171), False, 'import numpy\n'), ((3266, 3280), 'theano.tensor.tanh', 'tensor.tanh', (['x'], {}), '(x)\n', (3277, 3280), True, 'import theano.tensor as tensor\n'), ((4465, 4491), 'theano.tensor.zeros', 'tensor.zeros', (['output_shape'], {}), '(output_shape)\n', (4477, 4491), True, 'import theano.tensor as tensor\n'), ((1595, 1629), 'theano.shared', 'theano.shared', (['params[kk]'], {'name': 'kk'}), '(params[kk], name=kk)\n', (1608, 1629), False, 'import theano\n'), ((1738, 1754), 'numpy.load', 'numpy.load', (['path'], {}), '(path)\n', (1748, 1754), False, 'import numpy\n'), ((2285, 2301), 'numpy.load', 'numpy.load', (['path'], {}), '(path)\n', (2295, 2301), False, 'import numpy\n'), ((4792, 4830), 'theano.tensor.set_subtensor', 'tensor.set_subtensor', (['out[indices]', 'tt'], {}), '(out[indices], tt)\n', (4812, 4830), True, 'import theano.tensor as tensor\n'), ((5186, 5206), 'numpy.zeros_like', 'numpy.zeros_like', (['vv'], {}), '(vv)\n', (5202, 5206), False, 'import numpy\n'), ((422, 461), 'theano.version.short_version.split', 'theano.version.short_version.split', (['"""."""'], {}), "('.')\n", (456, 461), False, 'import theano\n'), ((1788, 1813), 'numpy.load', 'numpy.load', (["(path + '.npz')"], {}), "(path + '.npz')\n", (1798, 1813), False, 'import numpy\n'), ((2335, 2360), 'numpy.load', 'numpy.load', (["(path + '.npz')"], {}), "(path + '.npz')\n", (2345, 2360), False, 'import numpy\n')] |
"""
===============================================
vidgear library source-code is deployed under the Apache 2.0 License:
Copyright (c) 2019-2020 <NAME>(@abhiTronix) <<EMAIL>>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================
"""
# import the necessary packages
import cv2
import time
import queue
import numpy as np
import logging as log
from mss import mss
import pyscreenshot as pysct
from threading import Thread, Event
from collections import deque, OrderedDict
from pkg_resources import parse_version
from mss.exception import ScreenShotError
from pyscreenshot.err import FailedBackendError
from .helper import capPropId, logger_handler
# define logger
logger = log.getLogger("ScreenGear")
logger.propagate = False
logger.addHandler(logger_handler())
logger.setLevel(log.DEBUG)
class ScreenGear:
"""
ScreenGear is designed exclusively for ultra-fast Screencasting, which means it can grab frames from your monitor in real-time, either by defining an area on the computer screen or full-screen,
at the expense of inconsiderable latency. ScreenGear also seamlessly support frame capturing from multiple monitors as well as supports multiple backends.
ScreenGear API implements a multi-threaded wrapper around pyscreenshot & python-mss python library, and also flexibly supports its internal parameter.
"""
def __init__(
self, monitor=None, backend="", colorspace=None, logging=False, **options
):
"""
This constructor method initializes the object state and attributes of the ScreenGear class.
Parameters:
monitor (int): enables `mss` backend and sets the index of the monitor screen.
backend (str): enables `pyscreenshot` and select suitable backend for extracting frames.
colorspace (str): selects the colorspace of the input stream.
logging (bool): enables/disables logging.
options (dict): provides the flexibility to manually set the dimensions of capture screen area.
"""
# enable logging if specified:
self.__logging = logging if isinstance(logging, bool) else False
# create monitor instance for the user-defined monitor
self.__monitor_instance = None
self.__backend = ""
if monitor is None:
self.__capture_object = pysct
self.__backend = backend.lower().strip()
else:
self.__capture_object = mss()
if backend.strip():
logger.warning(
"Backends are disabled for Monitor Indexing(monitor>=0)!"
)
try:
self.__monitor_instance = self.__capture_object.monitors[monitor]
except Exception as e:
logger.exception(str(e))
self.__monitor_instance = None
# assigns special parameter to global variable and clear
# Thread Timeout
self.__thread_timeout = options.pop("THREAD_TIMEOUT", None)
if self.__thread_timeout and isinstance(self.__thread_timeout, (int, float)):
# set values
self.__thread_timeout = int(self.__thread_timeout)
else:
# defaults to 5mins timeout
self.__thread_timeout = None
# define deque and assign it to global var
self.__queue = queue.Queue(maxsize=96) # max len 96 to check overflow
# log it
if logging:
logger.debug("Enabling Threaded Queue Mode by default for ScreenGear!")
if self.__thread_timeout:
logger.debug(
"Setting Video-Thread Timeout to {}s.".format(self.__thread_timeout)
)
# intiate screen dimension handler
screen_dims = {}
# reformat proper mss dict and assign to screen dimension handler
screen_dims = {
k.strip(): v
for k, v in options.items()
if k.strip() in ["top", "left", "width", "height"]
}
# check whether user-defined dimensions are provided
if screen_dims and len(screen_dims) == 4:
key_order = ("top", "left", "width", "height")
screen_dims = OrderedDict((k, screen_dims[k]) for k in key_order)
if logging:
logger.debug("Setting Capture-Area dimensions: {}!".format(screen_dims))
else:
screen_dims.clear()
# separately handle colorspace value to int conversion
if colorspace:
self.color_space = capPropId(colorspace.strip())
if logging and not (self.color_space is None):
logger.debug(
"Enabling `{}` colorspace for this video stream!".format(
colorspace.strip()
)
)
else:
self.color_space = None
# intialize mss capture instance
self.__mss_capture_instance = ""
try:
if self.__monitor_instance is None:
if screen_dims:
self.__mss_capture_instance = tuple(screen_dims.values())
# extract global frame from instance
self.frame = np.asanyarray(
self.__capture_object.grab(
bbox=self.__mss_capture_instance,
childprocess=False,
backend=self.__backend,
)
)
else:
if screen_dims:
self.__mss_capture_instance = {
"top": self.__monitor_instance["top"] + screen_dims["top"],
"left": self.__monitor_instance["left"] + screen_dims["left"],
"width": screen_dims["width"],
"height": screen_dims["height"],
"mon": monitor,
}
else:
self.__mss_capture_instance = (
self.__monitor_instance # otherwise create instance from monitor
)
# extract global frame from instance
self.frame = np.asanyarray(
self.__capture_object.grab(self.__mss_capture_instance)
)
# initialize and append to queue
self.__queue.put(self.frame)
except Exception as e:
if isinstance(e, ScreenShotError):
# otherwise catch and log errors
if logging:
logger.exception(self.__capture_object.get_error_details())
raise ValueError(
"[ScreenGear:ERROR] :: ScreenShotError caught, Wrong dimensions passed to python-mss, Kindly Refer Docs!"
)
elif isinstance(e, KeyError):
raise ValueError(
"[ScreenGear:ERROR] :: ScreenShotError caught, Invalid backend: `{}`, Kindly Refer Docs!".format(
backend
)
)
else:
raise SystemError(
"[ScreenGear:ERROR] :: Unable to grab any instance on this system, Are you running headless?"
)
# thread initialization
self.__thread = None
# initialize termination flag
self.__terminate = Event()
def start(self):
"""
Launches the internal *Threaded Frames Extractor* daemon
**Returns:** A reference to the ScreenGear class object.
"""
self.__thread = Thread(target=self.__update, name="ScreenGear", args=())
self.__thread.daemon = True
self.__thread.start()
return self
def __update(self):
"""
A **Threaded Frames Extractor**, that keep iterating frames from `mss` API to a internal monitored deque,
until the thread is terminated, or frames runs out.
"""
# intialize frame variable
frame = None
# keep looping infinitely until the thread is terminated
while True:
# if the thread indicator variable is set, stop the thread
if self.__terminate.is_set():
break
try:
if self.__monitor_instance:
frame = np.asanyarray(
self.__capture_object.grab(self.__mss_capture_instance)
)
else:
frame = np.asanyarray(
self.__capture_object.grab(
bbox=self.__mss_capture_instance,
childprocess=False,
backend=self.__backend,
)
)
if not self.__backend or self.__backend == "pil":
frame = frame[:, :, ::-1]
assert not (
frame is None or np.shape(frame) == ()
), "[ScreenGear:ERROR] :: Failed to retreive any valid frames!"
except Exception as e:
if isinstance(e, ScreenShotError):
raise RuntimeError(self.__capture_object.get_error_details())
else:
logger.exception(str(e))
self.__terminate.set()
continue
if not (self.color_space is None):
# apply colorspace to frames
color_frame = None
try:
if isinstance(self.color_space, int):
color_frame = cv2.cvtColor(frame, self.color_space)
else:
if self.__logging:
logger.warning(
"Global color_space parameter value `{}` is not a valid!".format(
self.color_space
)
)
self.color_space = None
except Exception as e:
# Catch if any error occurred
self.color_space = None
if self.__logging:
logger.exception(str(e))
logger.warning("Input colorspace is not a valid colorspace!")
if not (color_frame is None):
self.frame = color_frame
else:
self.frame = frame
else:
self.frame = frame
# append to queue
self.__queue.put(self.frame)
# finally release mss resources
if self.__monitor_instance:
self.__capture_object.close()
def read(self):
"""
Extracts frames synchronously from monitored deque, while maintaining a fixed-length frame buffer in the memory,
and blocks the thread if the deque is full.
**Returns:** A n-dimensional numpy array.
"""
# check whether or not termination flag is enabled
while not self.__terminate.is_set():
return self.__queue.get(timeout=self.__thread_timeout)
# otherwise return NoneType
return None
def stop(self):
"""
Safely terminates the thread, and release the resources.
"""
if self.__logging:
logger.debug("Terminating ScreenGear Processes.")
# indicate that the thread should be terminate
self.__terminate.set()
# wait until stream resources are released (producer thread might be still grabbing frame)
if self.__thread is not None:
if not (self.__queue is None):
while not self.__queue.empty():
try:
self.__queue.get_nowait()
except queue.Empty:
continue
self.__queue.task_done()
self.__thread.join()
| [
"threading.Thread",
"cv2.cvtColor",
"numpy.shape",
"mss.mss",
"threading.Event",
"collections.OrderedDict",
"queue.Queue",
"logging.getLogger"
] | [((1231, 1258), 'logging.getLogger', 'log.getLogger', (['"""ScreenGear"""'], {}), "('ScreenGear')\n", (1244, 1258), True, 'import logging as log\n'), ((3944, 3967), 'queue.Queue', 'queue.Queue', ([], {'maxsize': '(96)'}), '(maxsize=96)\n', (3955, 3967), False, 'import queue\n'), ((8039, 8046), 'threading.Event', 'Event', ([], {}), '()\n', (8044, 8046), False, 'from threading import Thread, Event\n'), ((8256, 8312), 'threading.Thread', 'Thread', ([], {'target': 'self.__update', 'name': '"""ScreenGear"""', 'args': '()'}), "(target=self.__update, name='ScreenGear', args=())\n", (8262, 8312), False, 'from threading import Thread, Event\n'), ((3031, 3036), 'mss.mss', 'mss', ([], {}), '()\n', (3034, 3036), False, 'from mss import mss\n'), ((4817, 4868), 'collections.OrderedDict', 'OrderedDict', (['((k, screen_dims[k]) for k in key_order)'], {}), '((k, screen_dims[k]) for k in key_order)\n', (4828, 4868), False, 'from collections import deque, OrderedDict\n'), ((10315, 10352), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'self.color_space'], {}), '(frame, self.color_space)\n', (10327, 10352), False, 'import cv2\n'), ((9654, 9669), 'numpy.shape', 'np.shape', (['frame'], {}), '(frame)\n', (9662, 9669), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 28 21:59:47 2019
@author: krups
"""
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras import layers,models
import numpy as np
import time
def split_test_train_function(texts_data,test_n,val_n):
return(texts_data[:test_n],
texts_data[test_n:test_n+val_n],
texts_data[test_n+val_n:])
def preprocessing(texts_data,images_data,max_length,vocabulary_size):
num = len(texts_data)
assert(num==len(images_data))
X_text, X_image, y_text = [],[],[]
for text,image in zip(texts_data,images_data):
for i in range(1,len(text)):
in_text, out_text = text[:i], text[i]
in_text = pad_sequences([in_text],maxlen=max_length).flatten()
out_text = to_categorical(out_text,num_classes = vocabulary_size)
X_text.append(in_text)
X_image.append(image)
y_text.append(out_text)
X_text = np.array(X_text)
X_image = np.array(X_image)
y_text = np.array(y_text)
return(X_text,X_image,y_text)
def create_model(X_train_image,max_length,vocabulary_size):
dim_embedding = 64
input_image = layers.Input(shape=(X_train_image.shape[1],))
fimage = layers.Dense(256,activation='relu',name="ImageFeature")(input_image)
## sequence model
input_text = layers.Input(shape=(max_length,))
ftxt = layers.Embedding(vocabulary_size,dim_embedding, mask_zero=True)(input_text)
ftxt = layers.LSTM(256,name="CaptionFeature")(ftxt)
## combined model for decoder
decoder = layers.add([ftxt,fimage])
decoder = layers.Dense(256,activation='relu')(decoder)
output = layers.Dense(vocabulary_size,activation='softmax')(decoder)
model_ = models.Model(inputs=[input_image, input_text],outputs=output)
model_.compile(loss='categorical_crossentropy', optimizer='adam')
return model_
def fit_model(model_,X_train_text, X_train_image, y_train_text, X_val_text, X_val_image, y_val_text):
start = time.time()
hist = model_.fit([X_train_image, X_train_text], y_train_text,
epochs=5, verbose=2,
batch_size=64,
validation_data=([X_val_image, X_val_text], y_val_text))
end = time.time()
print("TIME TOOK {:3.2f}MIN".format((end - start )/60))
return hist
| [
"keras.preprocessing.sequence.pad_sequences",
"keras.layers.LSTM",
"keras.layers.add",
"keras.models.Model",
"time.time",
"keras.layers.Dense",
"numpy.array",
"keras.layers.Embedding",
"keras.layers.Input",
"keras.utils.to_categorical"
] | [((997, 1013), 'numpy.array', 'np.array', (['X_text'], {}), '(X_text)\n', (1005, 1013), True, 'import numpy as np\n'), ((1028, 1045), 'numpy.array', 'np.array', (['X_image'], {}), '(X_image)\n', (1036, 1045), True, 'import numpy as np\n'), ((1060, 1076), 'numpy.array', 'np.array', (['y_text'], {}), '(y_text)\n', (1068, 1076), True, 'import numpy as np\n'), ((1221, 1266), 'keras.layers.Input', 'layers.Input', ([], {'shape': '(X_train_image.shape[1],)'}), '(shape=(X_train_image.shape[1],))\n', (1233, 1266), False, 'from keras import layers, models\n'), ((1388, 1421), 'keras.layers.Input', 'layers.Input', ([], {'shape': '(max_length,)'}), '(shape=(max_length,))\n', (1400, 1421), False, 'from keras import layers, models\n'), ((1613, 1639), 'keras.layers.add', 'layers.add', (['[ftxt, fimage]'], {}), '([ftxt, fimage])\n', (1623, 1639), False, 'from keras import layers, models\n'), ((1784, 1846), 'keras.models.Model', 'models.Model', ([], {'inputs': '[input_image, input_text]', 'outputs': 'output'}), '(inputs=[input_image, input_text], outputs=output)\n', (1796, 1846), False, 'from keras import layers, models\n'), ((2055, 2066), 'time.time', 'time.time', ([], {}), '()\n', (2064, 2066), False, 'import time\n'), ((2305, 2316), 'time.time', 'time.time', ([], {}), '()\n', (2314, 2316), False, 'import time\n'), ((1280, 1337), 'keras.layers.Dense', 'layers.Dense', (['(256)'], {'activation': '"""relu"""', 'name': '"""ImageFeature"""'}), "(256, activation='relu', name='ImageFeature')\n", (1292, 1337), False, 'from keras import layers, models\n'), ((1433, 1497), 'keras.layers.Embedding', 'layers.Embedding', (['vocabulary_size', 'dim_embedding'], {'mask_zero': '(True)'}), '(vocabulary_size, dim_embedding, mask_zero=True)\n', (1449, 1497), False, 'from keras import layers, models\n'), ((1520, 1559), 'keras.layers.LSTM', 'layers.LSTM', (['(256)'], {'name': '"""CaptionFeature"""'}), "(256, name='CaptionFeature')\n", (1531, 1559), False, 'from keras import layers, models\n'), ((1653, 1689), 'keras.layers.Dense', 'layers.Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (1665, 1689), False, 'from keras import layers, models\n'), ((1711, 1762), 'keras.layers.Dense', 'layers.Dense', (['vocabulary_size'], {'activation': '"""softmax"""'}), "(vocabulary_size, activation='softmax')\n", (1723, 1762), False, 'from keras import layers, models\n'), ((821, 874), 'keras.utils.to_categorical', 'to_categorical', (['out_text'], {'num_classes': 'vocabulary_size'}), '(out_text, num_classes=vocabulary_size)\n', (835, 874), False, 'from keras.utils import to_categorical\n'), ((745, 788), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['[in_text]'], {'maxlen': 'max_length'}), '([in_text], maxlen=max_length)\n', (758, 788), False, 'from keras.preprocessing.sequence import pad_sequences\n')] |
"""Test state_distinguishability."""
import numpy as np
from toqito.state_opt import state_distinguishability
from toqito.states import basis, bell
def test_state_distinguishability_one_state():
"""State distinguishability for single state."""
rho = bell(0) * bell(0).conj().T
states = [rho]
res = state_distinguishability(states)
np.testing.assert_equal(np.isclose(res, 1), True)
def test_state_distinguishability_one_state_vec():
"""State distinguishability for single vector state."""
rho = bell(0)
states = [rho]
res = state_distinguishability(states)
np.testing.assert_equal(np.isclose(res, 1), True)
def test_state_distinguishability_two_states():
"""State distinguishability for two state density matrices."""
e_0, e_1 = basis(2, 0), basis(2, 1)
e_00 = e_0 * e_0.conj().T
e_11 = e_1 * e_1.conj().T
states = [e_00, e_11]
probs = [1 / 2, 1 / 2]
res = state_distinguishability(states, probs)
np.testing.assert_equal(np.isclose(res, 1), True)
def test_unambiguous_state_distinguishability_two_states():
"""Unambiguous state distinguishability for two state density matrices."""
e_0, e_1 = basis(2, 0), basis(2, 1)
e_00 = e_0 * e_0.conj().T
e_11 = e_1 * e_1.conj().T
states = [e_00, e_11]
probs = [1 / 2, 1 / 2]
res = state_distinguishability(states, probs, dist_method="unambiguous")
np.testing.assert_equal(np.isclose(res, 1), True)
def test_state_distinguishability_three_state_vec():
"""State distinguishability for two state vectors."""
e_0, e_1 = basis(2, 0), basis(2, 1)
states = [e_0, e_1]
probs = [1 / 2, 1 / 2]
res = state_distinguishability(states, probs)
np.testing.assert_equal(np.isclose(res, 1), True)
def test_state_distinguishability_yyd_density_matrices():
"""Global distinguishability of the YYD states should yield 1."""
psi0 = bell(0) * bell(0).conj().T
psi1 = bell(1) * bell(1).conj().T
psi2 = bell(2) * bell(2).conj().T
psi3 = bell(3) * bell(3).conj().T
states = [
np.kron(psi0, psi0),
np.kron(psi2, psi1),
np.kron(psi3, psi1),
np.kron(psi1, psi1),
]
probs = [1 / 4, 1 / 4, 1 / 4, 1 / 4]
res = state_distinguishability(states, probs)
np.testing.assert_equal(np.isclose(res, 1, atol=0.001), True)
def test_invalid_state_distinguishability_probs():
"""Invalid probability vector for state distinguishability."""
with np.testing.assert_raises(ValueError):
rho1 = bell(0) * bell(0).conj().T
rho2 = bell(1) * bell(1).conj().T
states = [rho1, rho2]
state_distinguishability(states, [1, 2, 3])
def test_invalid_state_distinguishability_states():
"""Invalid number of states for state distinguishability."""
with np.testing.assert_raises(ValueError):
states = []
state_distinguishability(states)
if __name__ == "__main__":
np.testing.run_module_suite()
| [
"toqito.states.basis",
"numpy.testing.run_module_suite",
"numpy.testing.assert_raises",
"numpy.isclose",
"toqito.state_opt.state_distinguishability",
"toqito.states.bell",
"numpy.kron"
] | [((318, 350), 'toqito.state_opt.state_distinguishability', 'state_distinguishability', (['states'], {}), '(states)\n', (342, 350), False, 'from toqito.state_opt import state_distinguishability\n'), ((528, 535), 'toqito.states.bell', 'bell', (['(0)'], {}), '(0)\n', (532, 535), False, 'from toqito.states import basis, bell\n'), ((566, 598), 'toqito.state_opt.state_distinguishability', 'state_distinguishability', (['states'], {}), '(states)\n', (590, 598), False, 'from toqito.state_opt import state_distinguishability\n'), ((934, 973), 'toqito.state_opt.state_distinguishability', 'state_distinguishability', (['states', 'probs'], {}), '(states, probs)\n', (958, 973), False, 'from toqito.state_opt import state_distinguishability\n'), ((1333, 1399), 'toqito.state_opt.state_distinguishability', 'state_distinguishability', (['states', 'probs'], {'dist_method': '"""unambiguous"""'}), "(states, probs, dist_method='unambiguous')\n", (1357, 1399), False, 'from toqito.state_opt import state_distinguishability\n'), ((1669, 1708), 'toqito.state_opt.state_distinguishability', 'state_distinguishability', (['states', 'probs'], {}), '(states, probs)\n', (1693, 1708), False, 'from toqito.state_opt import state_distinguishability\n'), ((2235, 2274), 'toqito.state_opt.state_distinguishability', 'state_distinguishability', (['states', 'probs'], {}), '(states, probs)\n', (2259, 2274), False, 'from toqito.state_opt import state_distinguishability\n'), ((2934, 2963), 'numpy.testing.run_module_suite', 'np.testing.run_module_suite', ([], {}), '()\n', (2961, 2963), True, 'import numpy as np\n'), ((261, 268), 'toqito.states.bell', 'bell', (['(0)'], {}), '(0)\n', (265, 268), False, 'from toqito.states import basis, bell\n'), ((379, 397), 'numpy.isclose', 'np.isclose', (['res', '(1)'], {}), '(res, 1)\n', (389, 397), True, 'import numpy as np\n'), ((627, 645), 'numpy.isclose', 'np.isclose', (['res', '(1)'], {}), '(res, 1)\n', (637, 645), True, 'import numpy as np\n'), ((785, 796), 'toqito.states.basis', 'basis', (['(2)', '(0)'], {}), '(2, 0)\n', (790, 796), False, 'from toqito.states import basis, bell\n'), ((798, 809), 'toqito.states.basis', 'basis', (['(2)', '(1)'], {}), '(2, 1)\n', (803, 809), False, 'from toqito.states import basis, bell\n'), ((1002, 1020), 'numpy.isclose', 'np.isclose', (['res', '(1)'], {}), '(res, 1)\n', (1012, 1020), True, 'import numpy as np\n'), ((1184, 1195), 'toqito.states.basis', 'basis', (['(2)', '(0)'], {}), '(2, 0)\n', (1189, 1195), False, 'from toqito.states import basis, bell\n'), ((1197, 1208), 'toqito.states.basis', 'basis', (['(2)', '(1)'], {}), '(2, 1)\n', (1202, 1208), False, 'from toqito.states import basis, bell\n'), ((1428, 1446), 'numpy.isclose', 'np.isclose', (['res', '(1)'], {}), '(res, 1)\n', (1438, 1446), True, 'import numpy as np\n'), ((1582, 1593), 'toqito.states.basis', 'basis', (['(2)', '(0)'], {}), '(2, 0)\n', (1587, 1593), False, 'from toqito.states import basis, bell\n'), ((1595, 1606), 'toqito.states.basis', 'basis', (['(2)', '(1)'], {}), '(2, 1)\n', (1600, 1606), False, 'from toqito.states import basis, bell\n'), ((1737, 1755), 'numpy.isclose', 'np.isclose', (['res', '(1)'], {}), '(res, 1)\n', (1747, 1755), True, 'import numpy as np\n'), ((1904, 1911), 'toqito.states.bell', 'bell', (['(0)'], {}), '(0)\n', (1908, 1911), False, 'from toqito.states import basis, bell\n'), ((1942, 1949), 'toqito.states.bell', 'bell', (['(1)'], {}), '(1)\n', (1946, 1949), False, 'from toqito.states import basis, bell\n'), ((1980, 1987), 'toqito.states.bell', 'bell', (['(2)'], {}), '(2)\n', (1984, 1987), False, 'from toqito.states import basis, bell\n'), ((2018, 2025), 'toqito.states.bell', 'bell', (['(3)'], {}), '(3)\n', (2022, 2025), False, 'from toqito.states import basis, bell\n'), ((2069, 2088), 'numpy.kron', 'np.kron', (['psi0', 'psi0'], {}), '(psi0, psi0)\n', (2076, 2088), True, 'import numpy as np\n'), ((2098, 2117), 'numpy.kron', 'np.kron', (['psi2', 'psi1'], {}), '(psi2, psi1)\n', (2105, 2117), True, 'import numpy as np\n'), ((2127, 2146), 'numpy.kron', 'np.kron', (['psi3', 'psi1'], {}), '(psi3, psi1)\n', (2134, 2146), True, 'import numpy as np\n'), ((2156, 2175), 'numpy.kron', 'np.kron', (['psi1', 'psi1'], {}), '(psi1, psi1)\n', (2163, 2175), True, 'import numpy as np\n'), ((2303, 2333), 'numpy.isclose', 'np.isclose', (['res', '(1)'], {'atol': '(0.001)'}), '(res, 1, atol=0.001)\n', (2313, 2333), True, 'import numpy as np\n'), ((2470, 2506), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['ValueError'], {}), '(ValueError)\n', (2494, 2506), True, 'import numpy as np\n'), ((2630, 2673), 'toqito.state_opt.state_distinguishability', 'state_distinguishability', (['states', '[1, 2, 3]'], {}), '(states, [1, 2, 3])\n', (2654, 2673), False, 'from toqito.state_opt import state_distinguishability\n'), ((2802, 2838), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['ValueError'], {}), '(ValueError)\n', (2826, 2838), True, 'import numpy as np\n'), ((2868, 2900), 'toqito.state_opt.state_distinguishability', 'state_distinguishability', (['states'], {}), '(states)\n', (2892, 2900), False, 'from toqito.state_opt import state_distinguishability\n'), ((2523, 2530), 'toqito.states.bell', 'bell', (['(0)'], {}), '(0)\n', (2527, 2530), False, 'from toqito.states import basis, bell\n'), ((2565, 2572), 'toqito.states.bell', 'bell', (['(1)'], {}), '(1)\n', (2569, 2572), False, 'from toqito.states import basis, bell\n'), ((271, 278), 'toqito.states.bell', 'bell', (['(0)'], {}), '(0)\n', (275, 278), False, 'from toqito.states import basis, bell\n'), ((1914, 1921), 'toqito.states.bell', 'bell', (['(0)'], {}), '(0)\n', (1918, 1921), False, 'from toqito.states import basis, bell\n'), ((1952, 1959), 'toqito.states.bell', 'bell', (['(1)'], {}), '(1)\n', (1956, 1959), False, 'from toqito.states import basis, bell\n'), ((1990, 1997), 'toqito.states.bell', 'bell', (['(2)'], {}), '(2)\n', (1994, 1997), False, 'from toqito.states import basis, bell\n'), ((2028, 2035), 'toqito.states.bell', 'bell', (['(3)'], {}), '(3)\n', (2032, 2035), False, 'from toqito.states import basis, bell\n'), ((2533, 2540), 'toqito.states.bell', 'bell', (['(0)'], {}), '(0)\n', (2537, 2540), False, 'from toqito.states import basis, bell\n'), ((2575, 2582), 'toqito.states.bell', 'bell', (['(1)'], {}), '(1)\n', (2579, 2582), False, 'from toqito.states import basis, bell\n')] |
r"""
Solve Helmholtz equation in 2D with periodic bcs in one direction
and Dirichlet in the other
alpha u - \nabla^2 u = f,
Use Fourier basis for the periodic direction and Shen's Dirichlet basis for the
non-periodic direction.
The equation to solve is
alpha (u, v) - (\nabla^2 u, v) = (f, v)
"""
import sys
import os
import importlib
from sympy import symbols, cos, sin
import numpy as np
from mpi4py import MPI
from shenfun import inner, div, grad, TestFunction, TrialFunction, FunctionSpace, \
Array, Function, TensorProductSpace, dx
comm = MPI.COMM_WORLD
assert len(sys.argv) == 3, "Call with two command-line arguments"
assert sys.argv[-1].lower() in ('legendre', 'chebyshev', 'jacobi')
assert isinstance(int(sys.argv[-2]), int)
# Collect basis and solver from either Chebyshev or Legendre submodules
family = sys.argv[-1]
base = importlib.import_module('.'.join(('shenfun', family)))
Solver = base.la.Helmholtz
# Use sympy to compute a rhs, given an analytical solution
alpha = 2.
x, y = symbols("x,y", real=True)
ue = (cos(4*np.pi*x) + sin(2*y))*(1-x**2)
fe = alpha*ue - ue.diff(x, 2) - ue.diff(y, 2)
# Size of discretization
N = (int(sys.argv[-2]),)*2
SD = FunctionSpace(N[0], family, bc=(0, 0), scaled=True)
K1 = FunctionSpace(N[1], 'F', dtype='d')
T = TensorProductSpace(comm, (SD, K1), axes=(0, 1))
u = TrialFunction(T)
v = TestFunction(T)
# Get f on quad points
fj = Array(T, buffer=fe)
# Compute right hand side of Poisson equation
f_hat = Function(T)
f_hat = inner(v, fj, output_array=f_hat)
# Get left hand side of Helmholtz equation
matrices = inner(v, alpha*u - div(grad(u)))
# Create Helmholtz linear algebra solver
H = Solver(*matrices)
# Solve and transform to real space
u_hat = Function(T) # Solution spectral space
u_hat = H(u_hat, f_hat) # Solve
uq = Array(T)
uq = T.backward(u_hat, uq)
# Compare with analytical solution
uj = Array(T, buffer=ue)
print("Error=%2.16e" %(np.sqrt(dx(uj-uq)**2)))
assert np.allclose(uj, uq)
if 'pytest' not in os.environ:
import matplotlib.pyplot as plt
plt.figure()
X = T.local_mesh(True) # With broadcasting=True the shape of X is local_shape, even though the number of datapoints are still the same as in 1D
plt.contourf(X[0], X[1], uq)
plt.colorbar()
plt.figure()
plt.contourf(X[0], X[1], uj)
plt.colorbar()
plt.figure()
plt.contourf(X[0], X[1], uq-uj)
plt.colorbar()
plt.title('Error')
plt.show()
| [
"matplotlib.pyplot.title",
"sympy.symbols",
"matplotlib.pyplot.show",
"sympy.sin",
"shenfun.inner",
"numpy.allclose",
"shenfun.Array",
"sympy.cos",
"shenfun.TrialFunction",
"matplotlib.pyplot.colorbar",
"shenfun.grad",
"matplotlib.pyplot.figure",
"shenfun.Function",
"matplotlib.pyplot.cont... | [((1015, 1040), 'sympy.symbols', 'symbols', (['"""x,y"""'], {'real': '(True)'}), "('x,y', real=True)\n", (1022, 1040), False, 'from sympy import symbols, cos, sin\n'), ((1188, 1239), 'shenfun.FunctionSpace', 'FunctionSpace', (['N[0]', 'family'], {'bc': '(0, 0)', 'scaled': '(True)'}), '(N[0], family, bc=(0, 0), scaled=True)\n', (1201, 1239), False, 'from shenfun import inner, div, grad, TestFunction, TrialFunction, FunctionSpace, Array, Function, TensorProductSpace, dx\n'), ((1245, 1280), 'shenfun.FunctionSpace', 'FunctionSpace', (['N[1]', '"""F"""'], {'dtype': '"""d"""'}), "(N[1], 'F', dtype='d')\n", (1258, 1280), False, 'from shenfun import inner, div, grad, TestFunction, TrialFunction, FunctionSpace, Array, Function, TensorProductSpace, dx\n'), ((1285, 1332), 'shenfun.TensorProductSpace', 'TensorProductSpace', (['comm', '(SD, K1)'], {'axes': '(0, 1)'}), '(comm, (SD, K1), axes=(0, 1))\n', (1303, 1332), False, 'from shenfun import inner, div, grad, TestFunction, TrialFunction, FunctionSpace, Array, Function, TensorProductSpace, dx\n'), ((1337, 1353), 'shenfun.TrialFunction', 'TrialFunction', (['T'], {}), '(T)\n', (1350, 1353), False, 'from shenfun import inner, div, grad, TestFunction, TrialFunction, FunctionSpace, Array, Function, TensorProductSpace, dx\n'), ((1358, 1373), 'shenfun.TestFunction', 'TestFunction', (['T'], {}), '(T)\n', (1370, 1373), False, 'from shenfun import inner, div, grad, TestFunction, TrialFunction, FunctionSpace, Array, Function, TensorProductSpace, dx\n'), ((1403, 1422), 'shenfun.Array', 'Array', (['T'], {'buffer': 'fe'}), '(T, buffer=fe)\n', (1408, 1422), False, 'from shenfun import inner, div, grad, TestFunction, TrialFunction, FunctionSpace, Array, Function, TensorProductSpace, dx\n'), ((1478, 1489), 'shenfun.Function', 'Function', (['T'], {}), '(T)\n', (1486, 1489), False, 'from shenfun import inner, div, grad, TestFunction, TrialFunction, FunctionSpace, Array, Function, TensorProductSpace, dx\n'), ((1498, 1530), 'shenfun.inner', 'inner', (['v', 'fj'], {'output_array': 'f_hat'}), '(v, fj, output_array=f_hat)\n', (1503, 1530), False, 'from shenfun import inner, div, grad, TestFunction, TrialFunction, FunctionSpace, Array, Function, TensorProductSpace, dx\n'), ((1728, 1739), 'shenfun.Function', 'Function', (['T'], {}), '(T)\n', (1736, 1739), False, 'from shenfun import inner, div, grad, TestFunction, TrialFunction, FunctionSpace, Array, Function, TensorProductSpace, dx\n'), ((1819, 1827), 'shenfun.Array', 'Array', (['T'], {}), '(T)\n', (1824, 1827), False, 'from shenfun import inner, div, grad, TestFunction, TrialFunction, FunctionSpace, Array, Function, TensorProductSpace, dx\n'), ((1896, 1915), 'shenfun.Array', 'Array', (['T'], {'buffer': 'ue'}), '(T, buffer=ue)\n', (1901, 1915), False, 'from shenfun import inner, div, grad, TestFunction, TrialFunction, FunctionSpace, Array, Function, TensorProductSpace, dx\n'), ((1970, 1989), 'numpy.allclose', 'np.allclose', (['uj', 'uq'], {}), '(uj, uq)\n', (1981, 1989), True, 'import numpy as np\n'), ((2062, 2074), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2072, 2074), True, 'import matplotlib.pyplot as plt\n'), ((2227, 2255), 'matplotlib.pyplot.contourf', 'plt.contourf', (['X[0]', 'X[1]', 'uq'], {}), '(X[0], X[1], uq)\n', (2239, 2255), True, 'import matplotlib.pyplot as plt\n'), ((2260, 2274), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2272, 2274), True, 'import matplotlib.pyplot as plt\n'), ((2280, 2292), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2290, 2292), True, 'import matplotlib.pyplot as plt\n'), ((2297, 2325), 'matplotlib.pyplot.contourf', 'plt.contourf', (['X[0]', 'X[1]', 'uj'], {}), '(X[0], X[1], uj)\n', (2309, 2325), True, 'import matplotlib.pyplot as plt\n'), ((2330, 2344), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2342, 2344), True, 'import matplotlib.pyplot as plt\n'), ((2350, 2362), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2360, 2362), True, 'import matplotlib.pyplot as plt\n'), ((2367, 2400), 'matplotlib.pyplot.contourf', 'plt.contourf', (['X[0]', 'X[1]', '(uq - uj)'], {}), '(X[0], X[1], uq - uj)\n', (2379, 2400), True, 'import matplotlib.pyplot as plt\n'), ((2403, 2417), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2415, 2417), True, 'import matplotlib.pyplot as plt\n'), ((2422, 2440), 'matplotlib.pyplot.title', 'plt.title', (['"""Error"""'], {}), "('Error')\n", (2431, 2440), True, 'import matplotlib.pyplot as plt\n'), ((2446, 2456), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2454, 2456), True, 'import matplotlib.pyplot as plt\n'), ((1047, 1065), 'sympy.cos', 'cos', (['(4 * np.pi * x)'], {}), '(4 * np.pi * x)\n', (1050, 1065), False, 'from sympy import symbols, cos, sin\n'), ((1064, 1074), 'sympy.sin', 'sin', (['(2 * y)'], {}), '(2 * y)\n', (1067, 1074), False, 'from sympy import symbols, cos, sin\n'), ((1609, 1616), 'shenfun.grad', 'grad', (['u'], {}), '(u)\n', (1613, 1616), False, 'from shenfun import inner, div, grad, TestFunction, TrialFunction, FunctionSpace, Array, Function, TensorProductSpace, dx\n'), ((1947, 1958), 'shenfun.dx', 'dx', (['(uj - uq)'], {}), '(uj - uq)\n', (1949, 1958), False, 'from shenfun import inner, div, grad, TestFunction, TrialFunction, FunctionSpace, Array, Function, TensorProductSpace, dx\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import csv
import six
import zipfile
import numpy as np
GAZETTEER_FORMAT = "2s 1s 5s 2s 3s"
GAZETTEER_COLUMNS = ['country_code', 'feature_class', 'feature_code',
'admin1_code', 'admin2_code']
_GEONAMES_COLUMNS = ['geonameid', 'main_name', 'asciiname', 'alternatenames',
'latitude', 'longitude', 'feature_class',
'feature_code', 'country_code', 'cc2',
'admin1_code', 'admin2_code', 'admin3_code', 'admin4_code',
'population', 'elevation', 'dem', 'timezone',
'modification_date']
_GEONAMES_DTYPES = {
'feature_class': object,
'feature_code': object,
'country_code': object,
'admin1_code': object,
'admin2_code': object,
'admin3_code': object,
'admin4_code': object,
}
_GEONAMES_PANDAS_PARAMS = dict(
sep='\t',
header=None,
encoding='utf8',
quoting=csv.QUOTE_NONE,
names=_GEONAMES_COLUMNS,
dtype=_GEONAMES_DTYPES
)
def to_marisa(df, columns=GAZETTEER_COLUMNS, format=GAZETTEER_FORMAT):
"""
Encode ``pandas.DataFrame`` with GeoNames data
(loaded using :func:`read_geonames` and maybe filtered in some way)
to a ``marisa.RecordTrie``.
"""
import marisa_trie
return marisa_trie.RecordTrie(format, _iter_geonames_items(df, columns))
def to_dawg(df, columns=None, format=None):
"""
Encode ``pandas.DataFrame`` with GeoNames data
(loaded using :func:`read_geonames` and maybe filtered in some way)
to ``dawg.DAWG`` or ``dawg.RecordDAWG``. ``dawg.DAWG`` is created
if ``columns`` and ``format`` are both None.
"""
import dawg
if columns is None:
assert format is None
df = _split_names_into_rows(df)
return dawg.CompletionDAWG(iter(df.name))
return dawg.RecordDAWG(format, _iter_geonames_items(df, columns))
def read_geonames(filename):
"""
Parse geonames file to a pandas.DataFrame. File may be downloaded
from http://download.geonames.org/export/dump/; it should be unzipped
and in a "geonames table" format.
"""
import pandas as pd
return pd.read_csv(filename, **_GEONAMES_PANDAS_PARAMS)
def read_geonames_zipped(zip_filename, geonames_filename=None):
""" Parse zipped geonames file. """
if geonames_filename is None:
root, filename = os.path.split(zip_filename)
geonames_filename = filename.replace('.zip', '.txt')
with zipfile.ZipFile(zip_filename, 'r') as zf:
fp = zf.open(geonames_filename)
return read_geonames(fp)
def _iter_geonames_items(df, columns):
""" Iterate over (name, [column_values_as_utf8]) tuples """
df = _split_names_into_rows(df)
for idx, row in df.iterrows():
yield row['name'], _ensure_utf8([row[c] for c in columns])
def _joined_names_column(df):
"""
Join data from all name columns into a single column.
"""
return df.apply(
lambda row: ','.join(set([
six.text_type(n)
for n in [row['main_name'], row['asciiname'], row['alternatenames']]
if n and n is not np.nan
])),
axis=1
)
def _split_names_into_rows(df):
"""
Create a separate row for each alternate name (with other data duplicated).
Delete 'main_name', 'asciiname' and 'alternatenames' columns and add
a single 'name' column instead.
"""
import pandas as pd
names = _joined_names_column(df).str.split(',')
name_lenghts = names.map(len)
idx = np.repeat(name_lenghts.index, name_lenghts.values)
names_split = np.concatenate(names.values)
names_s = pd.Series(names_split, index=idx)
names_s.name = 'name'
df = df.join(names_s, )
del df['main_name']
del df['asciiname']
del df['alternatenames']
cols = df.columns.tolist()
cols = cols[0:1] + cols[-1:] + cols[1:-1]
df = df[cols]
return df.reset_index()
def _ensure_utf8(lst):
return [v.encode('utf8') if not isinstance(v, float) else str(v)
for v in lst]
| [
"zipfile.ZipFile",
"pandas.read_csv",
"six.text_type",
"pandas.Series",
"os.path.split",
"numpy.concatenate",
"numpy.repeat"
] | [((2210, 2258), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename, **_GEONAMES_PANDAS_PARAMS)\n', (2221, 2258), True, 'import pandas as pd\n'), ((3584, 3634), 'numpy.repeat', 'np.repeat', (['name_lenghts.index', 'name_lenghts.values'], {}), '(name_lenghts.index, name_lenghts.values)\n', (3593, 3634), True, 'import numpy as np\n'), ((3654, 3682), 'numpy.concatenate', 'np.concatenate', (['names.values'], {}), '(names.values)\n', (3668, 3682), True, 'import numpy as np\n'), ((3697, 3730), 'pandas.Series', 'pd.Series', (['names_split'], {'index': 'idx'}), '(names_split, index=idx)\n', (3706, 3730), True, 'import pandas as pd\n'), ((2424, 2451), 'os.path.split', 'os.path.split', (['zip_filename'], {}), '(zip_filename)\n', (2437, 2451), False, 'import os\n'), ((2523, 2557), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_filename', '"""r"""'], {}), "(zip_filename, 'r')\n", (2538, 2557), False, 'import zipfile\n'), ((3055, 3071), 'six.text_type', 'six.text_type', (['n'], {}), '(n)\n', (3068, 3071), False, 'import six\n')] |
import os
import time
import numpy as np
import pandas as pd
from preprocessing.clean import CleanData
from svd.svd_algorithm import SVDAlgorithm
from error_measures.measures import *
from cur.cur_algorithm import *
from collaborative_filtering.collaborate import *
'''
Download Dataset, if already avaialble, then print dataset exists
'''
def format_dataset():
for file in os.listdir('preprocessing/'):
if str(file).endswith('ml-100k'):
print("Dataset exists.")
cleaner = CleanData('preprocessing/ml-100k/ml-100k/u.data')
cleaner.process()
break
elif os.listdir('preprocessing/').index(file) == len(os.listdir('preprocessing/')) - 1:
print("Dataset doesn't exist. Rerun run.sh again.")
'''
Run collaborative filtering from function run_collaborative_filtering from the
collaborative_filtering.collaborate file
'''
def run_collaborative_filtering(M):
start = time.time()
m = M[300:350, 150:200].T
cf = Collaborate(m.T)
m_p = cf.fill()
print("Collaborative Filtering Time: " +str(time.time() - start))
print("RMSE Collaborative Filtering: " + str(rmse(m, m_p.T)))
print("Top K precision Collaborative Filtering: " + str(top_k(40, m, m_p.T)))
print("Spearman correlation Collaborative Filtering: " + str(spearman_correlation(m, m_p.T)))
'''
Run collaborative filtering baseline from function run_collaborative_filtering_baseline from the
collaborative_filtering.collaborate file
'''
def run_collaborative_filtering_baseline(M):
start = time.time()
m = M[300:350, 150:200]
cfb = Collaborate(m.T)
m_p = cfb.fill(baseline=True)
print("Collaborative Filtering with baseline Time: " +str(time.time() - start))
print("RMSE Collaborative Filtering with baseline: " + str(rmse(m, m_p.T)))
print("Top K precision Collaborative Filtering with baseline: " + str(top_k(40, m, m_p.T)))
print("Spearman correlation Collaborative Filtering with baseline: " + str(spearman_correlation(m, m_p.T)))
'''
Run svd from function SVDAlgorithm from the
svd.svd_algorithm file
'''
def run_svd(M):
s = SVDAlgorithm()
svd_start = time.time()
U, sigma, V = s.svd(M, dimension_reduction=1.0)
M_p = np.dot(np.dot(U, sigma), V)
print("SVD Time: " +str(time.time() - svd_start))
print("RMSE SVD: " + str(rmse(M, M_p)))
print("Top K precision SVD: " + str(top_k(40, M, M_p)))
print("Spearman correlation SVD: " + str(spearman_correlation(M, M_p)))
'''
Run svd with 90% retianed energy using function SVDAlgorithm from the
svd.svd_algorithm file
'''
def run_svd_reduce(M):
s = SVDAlgorithm()
svd_reduce_start = time.time()
U, sigma, V = s.svd(M, dimension_reduction=0.9)
M_p = np.dot(np.dot(U, sigma), V)
print("SVD Reduction Time: " +str(time.time() - svd_reduce_start))
print("RMSE Reduction SVD: " + str(rmse(M, M_p)))
print("Top K precision SVD Reduction: " + str(top_k(40, M, M_p)))
print("Spearman correlation SVD Reduction: " + str(spearman_correlation(M, M_p)))
'''
Run cur from cur.cur_algorithm
'''
def run_cur(M):
cur_start = time.time()
M_p = cur(M, 600, 600, repeat=False)
print("CUR Time: " +str(time.time() - cur_start))
print("RMSE CUR: " + str(rmse(M, M_p)))
print("Top K precision CUR: " + str(top_k(40, M, M_p)))
print("Spearman correlation CUR: " + str(spearman_correlation(M, M_p)))
'''
Run cur from cur.cur_algorithm to implement cur with 90% retained energy
'''
def run_cur_reduce(M):
cur_reduce_start = time.time()
M_p = cur(M, 600, 600, dim_red=0.9, repeat=True)
print("CUR Reduction Time: " +str(time.time() - cur_reduce_start))
print("RMSE Reduction CUR: " + str(rmse(M, M_p)))
print("Top K precision CUR Reduction: " + str(top_k(40, M, M_p)))
print("Spearman correlation CUR Reduction: " + str(spearman_correlation(M, M_p)))
'''
Driver Code
'''
if __name__=="__main__":
formated_dataset = False
for files in os.listdir('.'):
if str(files).endswith('.npy') or str(files).endswith('.csv'):
print("Formatted dataset already exists.")
formated_dataset = True
break
if formated_dataset is False:
format_dataset()
M = np.load('data.npy')
run_collaborative_filtering(M)
run_collaborative_filtering_baseline(M)
run_svd(M)
run_svd_reduce(M)
run_cur(M)
run_cur_reduce(M)
| [
"numpy.load",
"time.time",
"svd.svd_algorithm.SVDAlgorithm",
"preprocessing.clean.CleanData",
"numpy.dot",
"os.listdir"
] | [((386, 414), 'os.listdir', 'os.listdir', (['"""preprocessing/"""'], {}), "('preprocessing/')\n", (396, 414), False, 'import os\n'), ((962, 973), 'time.time', 'time.time', ([], {}), '()\n', (971, 973), False, 'import time\n'), ((1579, 1590), 'time.time', 'time.time', ([], {}), '()\n', (1588, 1590), False, 'import time\n'), ((2161, 2175), 'svd.svd_algorithm.SVDAlgorithm', 'SVDAlgorithm', ([], {}), '()\n', (2173, 2175), False, 'from svd.svd_algorithm import SVDAlgorithm\n'), ((2192, 2203), 'time.time', 'time.time', ([], {}), '()\n', (2201, 2203), False, 'import time\n'), ((2670, 2684), 'svd.svd_algorithm.SVDAlgorithm', 'SVDAlgorithm', ([], {}), '()\n', (2682, 2684), False, 'from svd.svd_algorithm import SVDAlgorithm\n'), ((2708, 2719), 'time.time', 'time.time', ([], {}), '()\n', (2717, 2719), False, 'import time\n'), ((3167, 3178), 'time.time', 'time.time', ([], {}), '()\n', (3176, 3178), False, 'import time\n'), ((3586, 3597), 'time.time', 'time.time', ([], {}), '()\n', (3595, 3597), False, 'import time\n'), ((4028, 4043), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (4038, 4043), False, 'import os\n'), ((4292, 4311), 'numpy.load', 'np.load', (['"""data.npy"""'], {}), "('data.npy')\n", (4299, 4311), True, 'import numpy as np\n'), ((2273, 2289), 'numpy.dot', 'np.dot', (['U', 'sigma'], {}), '(U, sigma)\n', (2279, 2289), True, 'import numpy as np\n'), ((2789, 2805), 'numpy.dot', 'np.dot', (['U', 'sigma'], {}), '(U, sigma)\n', (2795, 2805), True, 'import numpy as np\n'), ((517, 566), 'preprocessing.clean.CleanData', 'CleanData', (['"""preprocessing/ml-100k/ml-100k/u.data"""'], {}), "('preprocessing/ml-100k/ml-100k/u.data')\n", (526, 566), False, 'from preprocessing.clean import CleanData\n'), ((1098, 1109), 'time.time', 'time.time', ([], {}), '()\n', (1107, 1109), False, 'import time\n'), ((1742, 1753), 'time.time', 'time.time', ([], {}), '()\n', (1751, 1753), False, 'import time\n'), ((2322, 2333), 'time.time', 'time.time', ([], {}), '()\n', (2331, 2333), False, 'import time\n'), ((2848, 2859), 'time.time', 'time.time', ([], {}), '()\n', (2857, 2859), False, 'import time\n'), ((3248, 3259), 'time.time', 'time.time', ([], {}), '()\n', (3257, 3259), False, 'import time\n'), ((3689, 3700), 'time.time', 'time.time', ([], {}), '()\n', (3698, 3700), False, 'import time\n'), ((628, 656), 'os.listdir', 'os.listdir', (['"""preprocessing/"""'], {}), "('preprocessing/')\n", (638, 656), False, 'import os\n'), ((676, 704), 'os.listdir', 'os.listdir', (['"""preprocessing/"""'], {}), "('preprocessing/')\n", (686, 704), False, 'import os\n')] |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
def py_cpu_nms(dets, thresh):
x1 = np.ascontiguousarray(dets[:, 0])
y1 = np.ascontiguousarray(dets[:, 1])
x2 = np.ascontiguousarray(dets[:, 2])
y2 = np.ascontiguousarray(dets[:, 3])
areas = (x2 - x1) * (y2 - y1)
order = dets[:, 4].argsort()[::-1]
keep = list()
while order.size > 0:
pick_idx = order[0]
keep.append(pick_idx)
order = order[1:]
xx1 = np.maximum(x1[pick_idx], x1[order])
yy1 = np.maximum(y1[pick_idx], y1[order])
xx2 = np.minimum(x2[pick_idx], x2[order])
yy2 = np.minimum(y2[pick_idx], y2[order])
inter = np.maximum(xx2 - xx1, 0) * np.maximum(yy2 - yy1, 0)
iou = inter / np.maximum(areas[pick_idx] + areas[order] - inter, 1e-5)
order = order[iou <= thresh]
return keep
| [
"numpy.minimum",
"numpy.ascontiguousarray",
"numpy.maximum"
] | [((437, 469), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['dets[:, 0]'], {}), '(dets[:, 0])\n', (457, 469), True, 'import numpy as np\n'), ((479, 511), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['dets[:, 1]'], {}), '(dets[:, 1])\n', (499, 511), True, 'import numpy as np\n'), ((521, 553), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['dets[:, 2]'], {}), '(dets[:, 2])\n', (541, 553), True, 'import numpy as np\n'), ((563, 595), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['dets[:, 3]'], {}), '(dets[:, 3])\n', (583, 595), True, 'import numpy as np\n'), ((814, 849), 'numpy.maximum', 'np.maximum', (['x1[pick_idx]', 'x1[order]'], {}), '(x1[pick_idx], x1[order])\n', (824, 849), True, 'import numpy as np\n'), ((864, 899), 'numpy.maximum', 'np.maximum', (['y1[pick_idx]', 'y1[order]'], {}), '(y1[pick_idx], y1[order])\n', (874, 899), True, 'import numpy as np\n'), ((914, 949), 'numpy.minimum', 'np.minimum', (['x2[pick_idx]', 'x2[order]'], {}), '(x2[pick_idx], x2[order])\n', (924, 949), True, 'import numpy as np\n'), ((964, 999), 'numpy.minimum', 'np.minimum', (['y2[pick_idx]', 'y2[order]'], {}), '(y2[pick_idx], y2[order])\n', (974, 999), True, 'import numpy as np\n'), ((1017, 1041), 'numpy.maximum', 'np.maximum', (['(xx2 - xx1)', '(0)'], {}), '(xx2 - xx1, 0)\n', (1027, 1041), True, 'import numpy as np\n'), ((1044, 1068), 'numpy.maximum', 'np.maximum', (['(yy2 - yy1)', '(0)'], {}), '(yy2 - yy1, 0)\n', (1054, 1068), True, 'import numpy as np\n'), ((1091, 1148), 'numpy.maximum', 'np.maximum', (['(areas[pick_idx] + areas[order] - inter)', '(1e-05)'], {}), '(areas[pick_idx] + areas[order] - inter, 1e-05)\n', (1101, 1148), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""!
Author: <NAME> - ASCEE
Description: Designs octave band FIR filters from 16Hz to 16 kHz for a sampling
frequency of 48 kHz.
"""
# from asceefigs.plot import Bode, close, Figure
__all__ = ['freqResponse', 'bandpass_fir_design', 'lowpass_fir_design',
'arbitrary_fir_design']
import numpy as np
from scipy.signal import freqz, hann, firwin2
def freqResponse(fs, freq, coefs_b, coefs_a=1.):
"""
Computes the frequency response of the filter defined with the filter
coefficients:
Args:
fs: Sampling frequency [Hz]
freq: Array of frequencies to compute the response for
coefs_b: Forward coefficients (FIR coefficients)
coefs_a: Feedback coefficients (IIR)
Returns:
Complex frequency response for frequencies given in array
"""
Omg = 2*np.pi*freq/fs
w, H = freqz(coefs_b, coefs_a, worN=Omg)
return H
def bandpass_fir_design(L, fs, fl, fu, window=hann):
"""
Construct a bandpass filter
"""
assert fs/2 > fu, "Nyquist frequency needs to be higher than upper cut-off"
assert fu > fl, "Cut-off needs to be lower than Nyquist freq"
Omg2 = 2*np.pi*fu/fs
Omg1 = 2*np.pi*fl/fs
fir = np.empty(L, dtype=float)
# First Create ideal band-pass filter
fir[L//2] = (Omg2-Omg1)/np.pi
for n in range(1, L//2):
fir[n+L//2] = (np.sin(n*Omg2)-np.sin(n*Omg1))/(n*np.pi)
fir[L//2-n] = (np.sin(n*Omg2)-np.sin(n*Omg1))/(n*np.pi)
win = window(L, True)
fir_win = fir*win
return fir_win
def lowpass_fir_design(L, fs, fc, window=hann):
assert fs/2 > fc, "Nyquist frequency needs to be higher" \
" than upper cut-off"
Omgc = 2*np.pi*fc/fs
fir = np.empty(L, dtype=float)
# First Create ideal band-pass filter
fir[L//2] = Omgc/np.pi
for n in range(1, L//2):
fir[n+L//2] = np.sin(n*Omgc)/(n*np.pi)
fir[L//2-n] = np.sin(n*Omgc)/(n*np.pi)
win = window(L, True)
fir_win = fir*win
return fir_win
def arbitrary_fir_design(fs, L, freq, amps, window='hann'):
"""
Last frequency of freq should be fs/2
"""
return firwin2(L, freq, amps, fs=fs, window=window)
| [
"numpy.sin",
"numpy.empty",
"scipy.signal.freqz",
"scipy.signal.firwin2"
] | [((892, 925), 'scipy.signal.freqz', 'freqz', (['coefs_b', 'coefs_a'], {'worN': 'Omg'}), '(coefs_b, coefs_a, worN=Omg)\n', (897, 925), False, 'from scipy.signal import freqz, hann, firwin2\n'), ((1250, 1274), 'numpy.empty', 'np.empty', (['L'], {'dtype': 'float'}), '(L, dtype=float)\n', (1258, 1274), True, 'import numpy as np\n'), ((1772, 1796), 'numpy.empty', 'np.empty', (['L'], {'dtype': 'float'}), '(L, dtype=float)\n', (1780, 1796), True, 'import numpy as np\n'), ((2191, 2235), 'scipy.signal.firwin2', 'firwin2', (['L', 'freq', 'amps'], {'fs': 'fs', 'window': 'window'}), '(L, freq, amps, fs=fs, window=window)\n', (2198, 2235), False, 'from scipy.signal import freqz, hann, firwin2\n'), ((1919, 1935), 'numpy.sin', 'np.sin', (['(n * Omgc)'], {}), '(n * Omgc)\n', (1925, 1935), True, 'import numpy as np\n'), ((1966, 1982), 'numpy.sin', 'np.sin', (['(n * Omgc)'], {}), '(n * Omgc)\n', (1972, 1982), True, 'import numpy as np\n'), ((1405, 1421), 'numpy.sin', 'np.sin', (['(n * Omg2)'], {}), '(n * Omg2)\n', (1411, 1421), True, 'import numpy as np\n'), ((1420, 1436), 'numpy.sin', 'np.sin', (['(n * Omg1)'], {}), '(n * Omg1)\n', (1426, 1436), True, 'import numpy as np\n'), ((1469, 1485), 'numpy.sin', 'np.sin', (['(n * Omg2)'], {}), '(n * Omg2)\n', (1475, 1485), True, 'import numpy as np\n'), ((1484, 1500), 'numpy.sin', 'np.sin', (['(n * Omg1)'], {}), '(n * Omg1)\n', (1490, 1500), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import math
def autolabel_user_1(rects):
for i, rect in enumerate(rects):
height = rect.get_height()
if i == 0:
plt.text(rect.get_x() + rect.get_width() / 2., 1.08 * height, "%s" % round(height, 3), ha='center')
elif i == 1:
plt.text(rect.get_x() + rect.get_width() / 2., 1.05 * height, "%s" % round(height, 3), ha='center')
elif i == 2:
plt.text(rect.get_x() + rect.get_width() / 2., 1.03 * height, "%s" % round(height, 3), ha='center')
else:
plt.text(rect.get_x() + rect.get_width() / 2., 1.02 * height, "%s" % round(height, 3), ha='center')
def autolabel_user_2(rects):
for i, rect in enumerate(rects):
height = rect.get_height()
if i == 1:
plt.text(rect.get_x() + rect.get_width() / 2., 1.15 * height, "%s" % round(height, 3), ha='center')
elif i == 2:
plt.text(rect.get_x() + rect.get_width() / 2., 1.15 * height, "%s" % round(height, 3), ha='center')
elif i == 3:
plt.text(rect.get_x() + rect.get_width() / 2., 1.15 * height, "%s" % round(height, 3), ha='center')
else:
plt.text(rect.get_x() + rect.get_width() / 2., 1.15 * height, "%s" % round(height, 3), ha='center')
def log(list_name):
for i in range(len(list_name)):
list_name[i] = math.log10(list_name[i])
print(list_name[i])
return list_name
def ave(list_name):
for i in range(len(list_name)):
list_name[i] = list_name[i] / 900
print(list_name[i])
return list_name
size = 4
x = np.arange(size)
transmission_cloud = [9.980, 17.256, 30.162, 41.513] # 2,4,6,8 段视频
video_file_cloud = [86.11, 176.35, 263.38, 395.42] # cloud处理2,4,6,8个摄像头60s(s)
prediction_EaOP = [0.045, 0.076, 0.116, 0.165] # cloud/edge预测60s视频所用时间,拿到数据后直接预测
cloud = [(x + y + z) / 180 for x, y, z in zip(transmission_cloud, video_file_cloud, prediction_EaOP)]
print(cloud)
# cloud = log(cloud)
transmission_EaOP = [0.061, 0.135, 0.192, 0.280]
computation_EaOP = [60.837, 60.837, 60.837, 60.837] # 一台edge(s)
EaOP = [(x + y + z) / 180 for x, y, z in zip(transmission_EaOP, computation_EaOP, prediction_EaOP)]
print(EaOP)
# EaOP = log(EaOP)
error = [0.03, 0.032, 0.034, 0.038] # 生成一个包含有n个值,均为0.004的list,表示允许的误差范围[-0.003,0.003]
total_width, n = 0.8, 3
width = total_width / n
x = x - (total_width - width) / 2
plt.xlabel('Total Camera Numbers', fontsize=16)
plt.ylabel('Average Overall Cost (s)', fontsize=16)
rect1 = plt.bar(x - 0.45 * width, cloud, fc='#00A8A8', edgecolor="k", hatch="\\\\\\", yerr=error, width=0.75 * width,
capsize=8, label='Cloud', zorder=1.8)
rect2 = plt.bar(x + 0.45 * width, EaOP, fc='#730000', edgecolor="k", hatch="xxx", yerr=error, width=0.75 * width,
capsize=8, label='EATP', zorder=1.8)
plt.xticks(x, (2, 4, 6, 8), fontsize=14)
plt.yticks(fontsize=14)
plt.legend(loc='upper left', fontsize=12)
plt.grid(axis="y", zorder=0.5) # 生成网格,zorder越小代表越底层,bar设置为1.8刚好不遮住误差线
autolabel_user_1(rect1)
autolabel_user_2(rect2)
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.legend",
"math.log10",
"numpy.arange",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid"
] | [((1634, 1649), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (1643, 1649), True, 'import numpy as np\n'), ((2434, 2481), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Total Camera Numbers"""'], {'fontsize': '(16)'}), "('Total Camera Numbers', fontsize=16)\n", (2444, 2481), True, 'import matplotlib.pyplot as plt\n'), ((2482, 2533), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Average Overall Cost (s)"""'], {'fontsize': '(16)'}), "('Average Overall Cost (s)', fontsize=16)\n", (2492, 2533), True, 'import matplotlib.pyplot as plt\n'), ((2543, 2699), 'matplotlib.pyplot.bar', 'plt.bar', (['(x - 0.45 * width)', 'cloud'], {'fc': '"""#00A8A8"""', 'edgecolor': '"""k"""', 'hatch': '"""\\\\\\\\\\\\"""', 'yerr': 'error', 'width': '(0.75 * width)', 'capsize': '(8)', 'label': '"""Cloud"""', 'zorder': '(1.8)'}), "(x - 0.45 * width, cloud, fc='#00A8A8', edgecolor='k', hatch=\n '\\\\\\\\\\\\', yerr=error, width=0.75 * width, capsize=8, label='Cloud',\n zorder=1.8)\n", (2550, 2699), True, 'import matplotlib.pyplot as plt\n'), ((2715, 2861), 'matplotlib.pyplot.bar', 'plt.bar', (['(x + 0.45 * width)', 'EaOP'], {'fc': '"""#730000"""', 'edgecolor': '"""k"""', 'hatch': '"""xxx"""', 'yerr': 'error', 'width': '(0.75 * width)', 'capsize': '(8)', 'label': '"""EATP"""', 'zorder': '(1.8)'}), "(x + 0.45 * width, EaOP, fc='#730000', edgecolor='k', hatch='xxx',\n yerr=error, width=0.75 * width, capsize=8, label='EATP', zorder=1.8)\n", (2722, 2861), True, 'import matplotlib.pyplot as plt\n'), ((2875, 2915), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', '(2, 4, 6, 8)'], {'fontsize': '(14)'}), '(x, (2, 4, 6, 8), fontsize=14)\n', (2885, 2915), True, 'import matplotlib.pyplot as plt\n'), ((2916, 2939), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (2926, 2939), True, 'import matplotlib.pyplot as plt\n'), ((2940, 2981), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""', 'fontsize': '(12)'}), "(loc='upper left', fontsize=12)\n", (2950, 2981), True, 'import matplotlib.pyplot as plt\n'), ((2982, 3012), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'axis': '"""y"""', 'zorder': '(0.5)'}), "(axis='y', zorder=0.5)\n", (2990, 3012), True, 'import matplotlib.pyplot as plt\n'), ((3102, 3112), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3110, 3112), True, 'import matplotlib.pyplot as plt\n'), ((1396, 1420), 'math.log10', 'math.log10', (['list_name[i]'], {}), '(list_name[i])\n', (1406, 1420), False, 'import math\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 04 11:12:45 2016
@author: <NAME>
"""
from __future__ import division, print_function, absolute_import, unicode_literals
from os import path, remove # File Path formatting
import numpy as np # For array operations
from scipy.io.matlab import loadmat # To load parameters stored in Matlab .mat file
import h5py
from sidpy.sid import Translator
from sidpy.hdf.hdf_utils import write_simple_attrs, link_h5_objects_as_attrs
from pyUSID.io.write_utils import VALUES_DTYPE, Dimension
from pyUSID.io.hdf_utils import create_indexed_group, write_main_dataset
from .df_utils.gmode_utils import readGmodeParms
class GDMTranslator(Translator):
"""
Translates G-mode w^2 datasets from .mat files to .h5
"""
def _read_data(self):
pass
def _parse_file_path(self, input_path):
pass
def translate(self, parm_path):
"""
Basic method that translates .mat data files to a single .h5 file
Parameters
------------
parm_path : string / unicode
Absolute file path of the parameters .mat file.
Returns
----------
h5_path : string / unicode
Absolute path of the translated h5 file
"""
self.parm_path = path.abspath(parm_path)
(folder_path, file_name) = path.split(parm_path)
(file_name, base_name) = path.split(folder_path)
h5_path = path.join(folder_path, base_name + '.h5')
# Read parameters
parm_dict = readGmodeParms(parm_path)
# Add the w^2 specific parameters to this list
parm_data = loadmat(parm_path, squeeze_me=True, struct_as_record=True)
freq_sweep_parms = parm_data['freqSweepParms']
parm_dict['freq_sweep_delay'] = np.float(freq_sweep_parms['delay'].item())
gen_sig = parm_data['genSig']
parm_dict['wfm_fix_d_fast'] = np.int32(gen_sig['restrictT'].item())
freq_array = np.float32(parm_data['freqArray'])
# prepare and write spectroscopic values
samp_rate = parm_dict['IO_down_samp_rate_[Hz]']
num_bins = int(parm_dict['wfm_n_cycles'] * parm_dict['wfm_p_slow'] * samp_rate)
w_vec = np.arange(-0.5 * samp_rate, 0.5 * samp_rate, np.float32(samp_rate / num_bins))
# There is most likely a more elegant solution to this but I don't have the time... Maybe np.meshgrid
spec_val_mat = np.zeros((len(freq_array) * num_bins, 2), dtype=VALUES_DTYPE)
spec_val_mat[:, 0] = np.tile(w_vec, len(freq_array))
spec_val_mat[:, 1] = np.repeat(freq_array, num_bins)
spec_ind_mat = np.zeros((2, len(freq_array) * num_bins), dtype=np.int32)
spec_ind_mat[0, :] = np.tile(np.arange(num_bins), len(freq_array))
spec_ind_mat[1, :] = np.repeat(np.arange(len(freq_array)), num_bins)
num_rows = parm_dict['grid_num_rows']
num_cols = parm_dict['grid_num_cols']
parm_dict['data_type'] = 'GmodeW2'
num_pix = num_rows * num_cols
global_parms = dict()
global_parms['grid_size_x'] = parm_dict['grid_num_cols']
global_parms['grid_size_y'] = parm_dict['grid_num_rows']
# assuming that the experiment was completed:
global_parms['current_position_x'] = parm_dict['grid_num_cols'] - 1
global_parms['current_position_y'] = parm_dict['grid_num_rows'] - 1
global_parms['data_type'] = parm_dict['data_type'] # self.__class__.__name__
global_parms['translator'] = 'W2'
# Now start creating datasets and populating:
if path.exists(h5_path):
remove(h5_path)
h5_f = h5py.File(h5_path, 'w')
write_simple_attrs(h5_f, global_parms)
meas_grp = create_indexed_group(h5_f, 'Measurement')
chan_grp = create_indexed_group(meas_grp, 'Channel')
write_simple_attrs(chan_grp, parm_dict)
pos_dims = [Dimension('X', 'nm', num_rows),
Dimension('Y', 'nm', num_cols)]
spec_dims = [Dimension('Response Bin', 'a.u.', num_bins),
Dimension('Excitation Frequency ', 'Hz', len(freq_array))]
# Minimize file size to the extent possible.
# DAQs are rated at 16 bit so float16 should be most appropriate.
# For some reason, compression is more effective on time series data
h5_main = write_main_dataset(chan_grp, (num_pix, num_bins), 'Raw_Data',
'Deflection', 'V',
pos_dims, spec_dims,
chunks=(1, num_bins), dtype=np.float32)
h5_ex_freqs = chan_grp.create_dataset('Excitation_Frequencies', freq_array)
h5_bin_freq = chan_grp.create_dataset('Bin_Frequencies', w_vec)
# Now doing link_h5_objects_as_attrs:
link_h5_objects_as_attrs(h5_main, [h5_ex_freqs, h5_bin_freq])
# Now read the raw data files:
pos_ind = 0
for row_ind in range(1, num_rows + 1):
for col_ind in range(1, num_cols + 1):
file_path = path.join(folder_path, 'fSweep_r' + str(row_ind) + '_c' + str(col_ind) + '.mat')
print('Working on row {} col {}'.format(row_ind, col_ind))
if path.exists(file_path):
# Load data file
pix_data = loadmat(file_path, squeeze_me=True)
pix_mat = pix_data['AI_mat']
# Take the inverse FFT on 2nd dimension
pix_mat = np.fft.ifft(np.fft.ifftshift(pix_mat, axes=1), axis=1)
# Verified with Matlab - no conjugate required here.
pix_vec = pix_mat.transpose().reshape(pix_mat.size)
h5_main[pos_ind, :] = np.float32(pix_vec)
h5_f.flush() # flush from memory!
else:
print('File not found for: row {} col {}'.format(row_ind, col_ind))
pos_ind += 1
if (100.0 * pos_ind / num_pix) % 10 == 0:
print('completed translating {} %'.format(int(100 * pos_ind / num_pix)))
h5_f.close()
return h5_path
| [
"sidpy.hdf.hdf_utils.write_simple_attrs",
"os.path.abspath",
"h5py.File",
"os.remove",
"numpy.fft.ifftshift",
"scipy.io.matlab.loadmat",
"sidpy.hdf.hdf_utils.link_h5_objects_as_attrs",
"pyUSID.io.hdf_utils.write_main_dataset",
"numpy.float32",
"pyUSID.io.write_utils.Dimension",
"os.path.exists",... | [((1305, 1328), 'os.path.abspath', 'path.abspath', (['parm_path'], {}), '(parm_path)\n', (1317, 1328), False, 'from os import path, remove\n'), ((1364, 1385), 'os.path.split', 'path.split', (['parm_path'], {}), '(parm_path)\n', (1374, 1385), False, 'from os import path, remove\n'), ((1419, 1442), 'os.path.split', 'path.split', (['folder_path'], {}), '(folder_path)\n', (1429, 1442), False, 'from os import path, remove\n'), ((1461, 1502), 'os.path.join', 'path.join', (['folder_path', "(base_name + '.h5')"], {}), "(folder_path, base_name + '.h5')\n", (1470, 1502), False, 'from os import path, remove\n'), ((1652, 1710), 'scipy.io.matlab.loadmat', 'loadmat', (['parm_path'], {'squeeze_me': '(True)', 'struct_as_record': '(True)'}), '(parm_path, squeeze_me=True, struct_as_record=True)\n', (1659, 1710), False, 'from scipy.io.matlab import loadmat\n'), ((1984, 2018), 'numpy.float32', 'np.float32', (["parm_data['freqArray']"], {}), "(parm_data['freqArray'])\n", (1994, 2018), True, 'import numpy as np\n'), ((2595, 2626), 'numpy.repeat', 'np.repeat', (['freq_array', 'num_bins'], {}), '(freq_array, num_bins)\n', (2604, 2626), True, 'import numpy as np\n'), ((3597, 3617), 'os.path.exists', 'path.exists', (['h5_path'], {}), '(h5_path)\n', (3608, 3617), False, 'from os import path, remove\n'), ((3663, 3686), 'h5py.File', 'h5py.File', (['h5_path', '"""w"""'], {}), "(h5_path, 'w')\n", (3672, 3686), False, 'import h5py\n'), ((3695, 3733), 'sidpy.hdf.hdf_utils.write_simple_attrs', 'write_simple_attrs', (['h5_f', 'global_parms'], {}), '(h5_f, global_parms)\n', (3713, 3733), False, 'from sidpy.hdf.hdf_utils import write_simple_attrs, link_h5_objects_as_attrs\n'), ((3754, 3795), 'pyUSID.io.hdf_utils.create_indexed_group', 'create_indexed_group', (['h5_f', '"""Measurement"""'], {}), "(h5_f, 'Measurement')\n", (3774, 3795), False, 'from pyUSID.io.hdf_utils import create_indexed_group, write_main_dataset\n'), ((3815, 3856), 'pyUSID.io.hdf_utils.create_indexed_group', 'create_indexed_group', (['meas_grp', '"""Channel"""'], {}), "(meas_grp, 'Channel')\n", (3835, 3856), False, 'from pyUSID.io.hdf_utils import create_indexed_group, write_main_dataset\n'), ((3865, 3904), 'sidpy.hdf.hdf_utils.write_simple_attrs', 'write_simple_attrs', (['chan_grp', 'parm_dict'], {}), '(chan_grp, parm_dict)\n', (3883, 3904), False, 'from sidpy.hdf.hdf_utils import write_simple_attrs, link_h5_objects_as_attrs\n'), ((4381, 4526), 'pyUSID.io.hdf_utils.write_main_dataset', 'write_main_dataset', (['chan_grp', '(num_pix, num_bins)', '"""Raw_Data"""', '"""Deflection"""', '"""V"""', 'pos_dims', 'spec_dims'], {'chunks': '(1, num_bins)', 'dtype': 'np.float32'}), "(chan_grp, (num_pix, num_bins), 'Raw_Data', 'Deflection',\n 'V', pos_dims, spec_dims, chunks=(1, num_bins), dtype=np.float32)\n", (4399, 4526), False, 'from pyUSID.io.hdf_utils import create_indexed_group, write_main_dataset\n'), ((4846, 4907), 'sidpy.hdf.hdf_utils.link_h5_objects_as_attrs', 'link_h5_objects_as_attrs', (['h5_main', '[h5_ex_freqs, h5_bin_freq]'], {}), '(h5_main, [h5_ex_freqs, h5_bin_freq])\n', (4870, 4907), False, 'from sidpy.hdf.hdf_utils import write_simple_attrs, link_h5_objects_as_attrs\n'), ((2275, 2307), 'numpy.float32', 'np.float32', (['(samp_rate / num_bins)'], {}), '(samp_rate / num_bins)\n', (2285, 2307), True, 'import numpy as np\n'), ((2746, 2765), 'numpy.arange', 'np.arange', (['num_bins'], {}), '(num_bins)\n', (2755, 2765), True, 'import numpy as np\n'), ((3631, 3646), 'os.remove', 'remove', (['h5_path'], {}), '(h5_path)\n', (3637, 3646), False, 'from os import path, remove\n'), ((3927, 3957), 'pyUSID.io.write_utils.Dimension', 'Dimension', (['"""X"""', '"""nm"""', 'num_rows'], {}), "('X', 'nm', num_rows)\n", (3936, 3957), False, 'from pyUSID.io.write_utils import VALUES_DTYPE, Dimension\n'), ((3979, 4009), 'pyUSID.io.write_utils.Dimension', 'Dimension', (['"""Y"""', '"""nm"""', 'num_cols'], {}), "('Y', 'nm', num_cols)\n", (3988, 4009), False, 'from pyUSID.io.write_utils import VALUES_DTYPE, Dimension\n'), ((4032, 4075), 'pyUSID.io.write_utils.Dimension', 'Dimension', (['"""Response Bin"""', '"""a.u."""', 'num_bins'], {}), "('Response Bin', 'a.u.', num_bins)\n", (4041, 4075), False, 'from pyUSID.io.write_utils import VALUES_DTYPE, Dimension\n'), ((5269, 5291), 'os.path.exists', 'path.exists', (['file_path'], {}), '(file_path)\n', (5280, 5291), False, 'from os import path, remove\n'), ((5361, 5396), 'scipy.io.matlab.loadmat', 'loadmat', (['file_path'], {'squeeze_me': '(True)'}), '(file_path, squeeze_me=True)\n', (5368, 5396), False, 'from scipy.io.matlab import loadmat\n'), ((5778, 5797), 'numpy.float32', 'np.float32', (['pix_vec'], {}), '(pix_vec)\n', (5788, 5797), True, 'import numpy as np\n'), ((5548, 5581), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['pix_mat'], {'axes': '(1)'}), '(pix_mat, axes=1)\n', (5564, 5581), True, 'import numpy as np\n')] |
import functools
import io
import warnings
from operator import attrgetter
import numpy as np
import pytest
import torch
from torch import nn
from torch.distributions import constraints
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
from pyro.infer import SVI, Trace_ELBO, TraceEnum_ELBO, TraceGraph_ELBO, Predictive
from pyro.infer.autoguide import (AutoCallable, AutoDelta, AutoDiagonalNormal, AutoDiscreteParallel, AutoGuide,
AutoGuideList, AutoIAFNormal, AutoLaplaceApproximation, AutoLowRankMultivariateNormal,
AutoMultivariateNormal, init_to_feasible, init_to_mean, init_to_median,
init_to_sample)
from pyro.nn.module import PyroModule, PyroParam, PyroSample
from pyro.optim import Adam
from pyro.poutine.util import prune_subsample_sites
from pyro.util import check_model_guide_match
from tests.common import assert_close, assert_equal
@pytest.mark.parametrize("auto_class", [
AutoDiagonalNormal,
AutoMultivariateNormal,
AutoLowRankMultivariateNormal,
AutoIAFNormal,
])
def test_scores(auto_class):
def model():
if auto_class is AutoIAFNormal:
pyro.sample("z", dist.Normal(0.0, 1.0).expand([10]))
else:
pyro.sample("z", dist.Normal(0.0, 1.0))
guide = auto_class(model)
guide_trace = poutine.trace(guide).get_trace()
model_trace = poutine.trace(poutine.replay(model, guide_trace)).get_trace()
guide_trace.compute_log_prob()
model_trace.compute_log_prob()
prefix = auto_class.__name__
assert '_{}_latent'.format(prefix) not in model_trace.nodes
assert model_trace.nodes['z']['log_prob_sum'].item() != 0.0
assert guide_trace.nodes['_{}_latent'.format(prefix)]['log_prob_sum'].item() != 0.0
assert guide_trace.nodes['z']['log_prob_sum'].item() == 0.0
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])
@pytest.mark.parametrize("auto_class", [
AutoDelta,
AutoDiagonalNormal,
AutoMultivariateNormal,
AutoLowRankMultivariateNormal,
AutoIAFNormal,
AutoLaplaceApproximation,
])
def test_factor(auto_class, Elbo):
def model(log_factor):
pyro.sample("z1", dist.Normal(0.0, 1.0))
pyro.factor("f1", log_factor)
pyro.sample("z2", dist.Normal(torch.zeros(2), torch.ones(2)).to_event(1))
with pyro.plate("plate", 3):
pyro.factor("f2", log_factor)
pyro.sample("z3", dist.Normal(torch.zeros(3), torch.ones(3)))
guide = auto_class(model)
elbo = Elbo(strict_enumeration_warning=False)
elbo.loss(model, guide, torch.tensor(0.)) # initialize param store
pyro.set_rng_seed(123)
loss_5 = elbo.loss(model, guide, torch.tensor(5.))
pyro.set_rng_seed(123)
loss_4 = elbo.loss(model, guide, torch.tensor(4.))
assert_close(loss_5 - loss_4, -1 - 3)
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])
@pytest.mark.parametrize("init_loc_fn", [
init_to_feasible,
init_to_mean,
init_to_median,
init_to_sample,
])
@pytest.mark.parametrize("auto_class", [
AutoDelta,
AutoDiagonalNormal,
AutoMultivariateNormal,
AutoLowRankMultivariateNormal,
AutoIAFNormal,
AutoLaplaceApproximation,
])
def test_shapes(auto_class, init_loc_fn, Elbo):
def model():
pyro.sample("z1", dist.Normal(0.0, 1.0))
pyro.sample("z2", dist.Normal(torch.zeros(2), torch.ones(2)).to_event(1))
with pyro.plate("plate", 3):
pyro.sample("z3", dist.Normal(torch.zeros(3), torch.ones(3)))
pyro.sample("z4", dist.MultivariateNormal(torch.zeros(2), torch.eye(2)))
pyro.sample("z5", dist.Dirichlet(torch.ones(3)))
pyro.sample("z6", dist.Normal(0, 1).expand((2,)).mask(torch.arange(2) > 0).to_event(1))
guide = auto_class(model, init_loc_fn=init_loc_fn)
elbo = Elbo(strict_enumeration_warning=False)
loss = elbo.loss(model, guide)
assert np.isfinite(loss), loss
@pytest.mark.xfail(reason="sequential plate is not yet supported")
@pytest.mark.parametrize('auto_class', [
AutoDelta,
AutoDiagonalNormal,
AutoMultivariateNormal,
AutoLowRankMultivariateNormal,
AutoIAFNormal,
AutoLaplaceApproximation,
])
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO])
def test_iplate_smoke(auto_class, Elbo):
def model():
x = pyro.sample("x", dist.Normal(0, 1))
assert x.shape == ()
for i in pyro.plate("plate", 3):
y = pyro.sample("y_{}".format(i), dist.Normal(0, 1).expand_by([2, 1 + i, 2]).to_event(3))
assert y.shape == (2, 1 + i, 2)
z = pyro.sample("z", dist.Normal(0, 1).expand_by([2]).to_event(1))
assert z.shape == (2,)
pyro.sample("obs", dist.Bernoulli(0.1), obs=torch.tensor(0))
guide = auto_class(model)
infer = SVI(model, guide, Adam({"lr": 1e-6}), Elbo(strict_enumeration_warning=False))
infer.step()
def auto_guide_list_x(model):
guide = AutoGuideList(model)
guide.append(AutoDelta(poutine.block(model, expose=["x"])))
guide.append(AutoDiagonalNormal(poutine.block(model, hide=["x"])))
return guide
def auto_guide_callable(model):
def guide_x():
x_loc = pyro.param("x_loc", torch.tensor(1.))
x_scale = pyro.param("x_scale", torch.tensor(.1), constraint=constraints.positive)
pyro.sample("x", dist.Normal(x_loc, x_scale))
def median_x():
return {"x": pyro.param("x_loc", torch.tensor(1.))}
guide = AutoGuideList(model)
guide.append(AutoCallable(model, guide_x, median_x))
guide.append(AutoDiagonalNormal(poutine.block(model, hide=["x"])))
return guide
def auto_guide_module_callable(model):
class GuideX(AutoGuide):
def __init__(self, model):
super().__init__(model)
self.x_loc = nn.Parameter(torch.tensor(1.))
self.x_scale = PyroParam(torch.tensor(.1), constraint=constraints.positive)
def forward(self, *args, **kwargs):
return {"x": pyro.sample("x", dist.Normal(self.x_loc, self.x_scale))}
def median(self, *args, **kwargs):
return {"x": self.x_loc.detach()}
guide = AutoGuideList(model)
guide.custom = GuideX(model)
guide.diagnorm = AutoDiagonalNormal(poutine.block(model, hide=["x"]))
return guide
def nested_auto_guide_callable(model):
guide = AutoGuideList(model)
guide.append(AutoDelta(poutine.block(model, expose=['x'])))
guide_y = AutoGuideList(poutine.block(model, expose=['y']))
guide_y.z = AutoIAFNormal(poutine.block(model, expose=['y']))
guide.append(guide_y)
return guide
@pytest.mark.parametrize("auto_class", [
AutoDelta,
AutoDiagonalNormal,
AutoMultivariateNormal,
AutoLowRankMultivariateNormal,
AutoLaplaceApproximation,
auto_guide_list_x,
auto_guide_callable,
auto_guide_module_callable,
functools.partial(AutoDiagonalNormal, init_loc_fn=init_to_feasible),
functools.partial(AutoDiagonalNormal, init_loc_fn=init_to_mean),
functools.partial(AutoDiagonalNormal, init_loc_fn=init_to_median),
functools.partial(AutoDiagonalNormal, init_loc_fn=init_to_sample),
])
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])
def test_median(auto_class, Elbo):
def model():
pyro.sample("x", dist.Normal(0.0, 1.0))
pyro.sample("y", dist.LogNormal(0.0, 1.0))
pyro.sample("z", dist.Beta(2.0, 2.0))
guide = auto_class(model)
optim = Adam({'lr': 0.05, 'betas': (0.8, 0.99)})
elbo = Elbo(strict_enumeration_warning=False,
num_particles=100, vectorize_particles=True)
infer = SVI(model, guide, optim, elbo)
for _ in range(100):
infer.step()
if auto_class is AutoLaplaceApproximation:
guide = guide.laplace_approximation()
median = guide.median()
assert_equal(median["x"], torch.tensor(0.0), prec=0.1)
if auto_class is AutoDelta:
assert_equal(median["y"], torch.tensor(-1.0).exp(), prec=0.1)
else:
assert_equal(median["y"], torch.tensor(1.0), prec=0.1)
assert_equal(median["z"], torch.tensor(0.5), prec=0.1)
@pytest.mark.parametrize("auto_class", [
AutoDelta,
AutoDiagonalNormal,
AutoMultivariateNormal,
AutoLowRankMultivariateNormal,
AutoLaplaceApproximation,
auto_guide_list_x,
auto_guide_module_callable,
nested_auto_guide_callable,
functools.partial(AutoDiagonalNormal, init_loc_fn=init_to_feasible),
functools.partial(AutoDiagonalNormal, init_loc_fn=init_to_mean),
functools.partial(AutoDiagonalNormal, init_loc_fn=init_to_median),
functools.partial(AutoDiagonalNormal, init_loc_fn=init_to_sample),
])
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])
def test_autoguide_serialization(auto_class, Elbo):
def model():
pyro.sample("x", dist.Normal(0.0, 1.0))
with pyro.plate("plate", 2):
pyro.sample("y", dist.LogNormal(0.0, 1.0))
pyro.sample("z", dist.Beta(2.0, 2.0))
guide = auto_class(model)
guide()
if auto_class is AutoLaplaceApproximation:
guide = guide.laplace_approximation()
pyro.set_rng_seed(0)
expected = guide.call()
names = sorted(guide())
# Ignore tracer warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=torch.jit.TracerWarning)
# XXX: check_trace=True fails for AutoLaplaceApproximation
traced_guide = torch.jit.trace_module(guide, {"call": ()}, check_trace=False)
f = io.BytesIO()
torch.jit.save(traced_guide, f)
f.seek(0)
guide_deser = torch.jit.load(f)
# Check .call() result.
pyro.set_rng_seed(0)
actual = guide_deser.call()
assert len(actual) == len(expected)
for name, a, e in zip(names, actual, expected):
assert_equal(a, e, msg="{}: {} vs {}".format(name, a, e))
# Check named_parameters.
expected_names = {name for name, _ in guide.named_parameters()}
actual_names = {name for name, _ in guide_deser.named_parameters()}
assert actual_names == expected_names
for name in actual_names:
# Get nested attributes.
attr_get = attrgetter(name)
assert_equal(attr_get(guide_deser), attr_get(guide).data)
@pytest.mark.parametrize("auto_class", [
AutoDiagonalNormal,
AutoMultivariateNormal,
AutoLowRankMultivariateNormal,
AutoLaplaceApproximation,
])
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])
def test_quantiles(auto_class, Elbo):
def model():
pyro.sample("x", dist.Normal(0.0, 1.0))
pyro.sample("y", dist.LogNormal(0.0, 1.0))
pyro.sample("z", dist.Beta(2.0, 2.0))
guide = auto_class(model)
optim = Adam({'lr': 0.05, 'betas': (0.8, 0.99)})
elbo = Elbo(strict_enumeration_warning=False,
num_particles=100, vectorize_particles=True)
infer = SVI(model, guide, optim, elbo)
for _ in range(100):
infer.step()
if auto_class is AutoLaplaceApproximation:
guide = guide.laplace_approximation()
quantiles = guide.quantiles([0.1, 0.5, 0.9])
median = guide.median()
for name in ["x", "y", "z"]:
assert_equal(median[name], quantiles[name][1])
quantiles = {name: [v.item() for v in value] for name, value in quantiles.items()}
assert -3.0 < quantiles["x"][0]
assert quantiles["x"][0] + 1.0 < quantiles["x"][1]
assert quantiles["x"][1] + 1.0 < quantiles["x"][2]
assert quantiles["x"][2] < 3.0
assert 0.01 < quantiles["y"][0]
assert quantiles["y"][0] * 2.0 < quantiles["y"][1]
assert quantiles["y"][1] * 2.0 < quantiles["y"][2]
assert quantiles["y"][2] < 100.0
assert 0.01 < quantiles["z"][0]
assert quantiles["z"][0] + 0.1 < quantiles["z"][1]
assert quantiles["z"][1] + 0.1 < quantiles["z"][2]
assert quantiles["z"][2] < 0.99
@pytest.mark.parametrize("continuous_class", [
AutoDelta,
AutoDiagonalNormal,
AutoMultivariateNormal,
AutoLowRankMultivariateNormal,
AutoIAFNormal,
AutoLaplaceApproximation,
])
def test_discrete_parallel(continuous_class):
K = 2
data = torch.tensor([0., 1., 10., 11., 12.])
def model(data):
weights = pyro.sample('weights', dist.Dirichlet(0.5 * torch.ones(K)))
locs = pyro.sample('locs', dist.Normal(0, 10).expand_by([K]).to_event(1))
scale = pyro.sample('scale', dist.LogNormal(0, 1))
with pyro.plate('data', len(data)):
weights = weights.expand(torch.Size((len(data),)) + weights.shape)
assignment = pyro.sample('assignment', dist.Categorical(weights))
pyro.sample('obs', dist.Normal(locs[assignment], scale), obs=data)
guide = AutoGuideList(model)
guide.append(continuous_class(poutine.block(model, hide=["assignment"])))
guide.append(AutoDiscreteParallel(poutine.block(model, expose=["assignment"])))
elbo = TraceEnum_ELBO(max_plate_nesting=1)
loss = elbo.loss_and_grads(model, guide, data)
assert np.isfinite(loss), loss
@pytest.mark.parametrize("auto_class", [
AutoDelta,
AutoDiagonalNormal,
AutoMultivariateNormal,
AutoLowRankMultivariateNormal,
AutoIAFNormal,
AutoLaplaceApproximation,
])
def test_guide_list(auto_class):
def model():
pyro.sample("x", dist.Normal(0., 1.).expand([2]))
pyro.sample("y", dist.MultivariateNormal(torch.zeros(5), torch.eye(5, 5)))
guide = AutoGuideList(model)
guide.append(auto_class(poutine.block(model, expose=["x"])))
guide.append(auto_class(poutine.block(model, expose=["y"])))
guide()
@pytest.mark.parametrize("auto_class", [
AutoDelta,
AutoDiagonalNormal,
AutoMultivariateNormal,
AutoLowRankMultivariateNormal,
AutoLaplaceApproximation,
])
def test_callable(auto_class):
def model():
pyro.sample("x", dist.Normal(0., 1.))
pyro.sample("y", dist.MultivariateNormal(torch.zeros(5), torch.eye(5, 5)))
def guide_x():
x_loc = pyro.param("x_loc", torch.tensor(0.))
pyro.sample("x", dist.Delta(x_loc))
guide = AutoGuideList(model)
guide.append(guide_x)
guide.append(auto_class(poutine.block(model, expose=["y"])))
values = guide()
assert set(values) == set(["y"])
@pytest.mark.parametrize("auto_class", [
AutoDelta,
AutoDiagonalNormal,
AutoMultivariateNormal,
AutoLowRankMultivariateNormal,
AutoLaplaceApproximation,
])
def test_callable_return_dict(auto_class):
def model():
pyro.sample("x", dist.Normal(0., 1.))
pyro.sample("y", dist.MultivariateNormal(torch.zeros(5), torch.eye(5, 5)))
def guide_x():
x_loc = pyro.param("x_loc", torch.tensor(0.))
x = pyro.sample("x", dist.Delta(x_loc))
return {"x": x}
guide = AutoGuideList(model)
guide.append(guide_x)
guide.append(auto_class(poutine.block(model, expose=["y"])))
values = guide()
assert set(values) == set(["x", "y"])
def test_empty_model_error():
def model():
pass
guide = AutoDiagonalNormal(model)
with pytest.raises(RuntimeError):
guide()
def test_unpack_latent():
def model():
return pyro.sample('x', dist.LKJCorrCholesky(2, torch.tensor(1.)))
guide = AutoDiagonalNormal(model)
assert guide()['x'].shape == model().shape
latent = guide.sample_latent()
assert list(guide._unpack_latent(latent))[0][1].shape == (1,)
@pytest.mark.parametrize("auto_class", [
AutoDelta,
AutoDiagonalNormal,
AutoMultivariateNormal,
AutoLowRankMultivariateNormal,
])
def test_init_loc_fn(auto_class):
def model():
pyro.sample("x", dist.Normal(0., 1.))
pyro.sample("y", dist.MultivariateNormal(torch.zeros(5), torch.eye(5, 5)))
inits = {"x": torch.randn(()), "y": torch.randn(5)}
def init_loc_fn(site):
return inits[site["name"]]
guide = auto_class(model, init_loc_fn=init_loc_fn)
guide()
median = guide.median()
assert_equal(median["x"], inits["x"])
assert_equal(median["y"], inits["y"])
# testing helper
class AutoLowRankMultivariateNormal_100(AutoLowRankMultivariateNormal):
def __init__(self, *args, **kwargs):
return super().__init__(*args, **kwargs, rank=100)
@pytest.mark.parametrize("init_scale", [1e-1, 1e-4, 1e-8])
@pytest.mark.parametrize("auto_class", [
AutoDiagonalNormal,
AutoMultivariateNormal,
AutoLowRankMultivariateNormal,
AutoLowRankMultivariateNormal_100,
])
def test_init_scale(auto_class, init_scale):
def model():
pyro.sample("x", dist.Normal(0., 1.))
pyro.sample("y", dist.MultivariateNormal(torch.zeros(5), torch.eye(5, 5)))
with pyro.plate("plate", 100):
pyro.sample("z", dist.Normal(0., 1.))
guide = auto_class(model, init_scale=init_scale)
guide()
loc, scale = guide._loc_scale()
scale_rms = scale.pow(2).mean().sqrt().item()
assert init_scale * 0.5 < scale_rms < 2.0 * init_scale
@pytest.mark.parametrize("auto_class", [
AutoDelta,
AutoDiagonalNormal,
AutoMultivariateNormal,
AutoLowRankMultivariateNormal,
AutoLaplaceApproximation,
auto_guide_list_x,
auto_guide_callable,
auto_guide_module_callable,
functools.partial(AutoDiagonalNormal, init_loc_fn=init_to_mean),
functools.partial(AutoDiagonalNormal, init_loc_fn=init_to_median),
])
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])
def test_median_module(auto_class, Elbo):
class Model(PyroModule):
def __init__(self):
super().__init__()
self.x_loc = nn.Parameter(torch.tensor(1.))
self.x_scale = PyroParam(torch.tensor(0.1), constraints.positive)
def forward(self):
pyro.sample("x", dist.Normal(self.x_loc, self.x_scale))
pyro.sample("y", dist.Normal(2., 0.1))
model = Model()
guide = auto_class(model)
infer = SVI(model, guide, Adam({'lr': 0.005}), Elbo(strict_enumeration_warning=False))
for _ in range(20):
infer.step()
if auto_class is AutoLaplaceApproximation:
guide = guide.laplace_approximation()
median = guide.median()
assert_equal(median["x"].detach(), torch.tensor(1.0), prec=0.1)
assert_equal(median["y"].detach(), torch.tensor(2.0), prec=0.1)
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])
def test_nested_autoguide(Elbo):
class Model(PyroModule):
def __init__(self):
super().__init__()
self.x_loc = nn.Parameter(torch.tensor(1.))
self.x_scale = PyroParam(torch.tensor(0.1), constraints.positive)
def forward(self):
pyro.sample("x", dist.Normal(self.x_loc, self.x_scale))
with pyro.plate("plate", 2):
pyro.sample("y", dist.Normal(2., 0.1))
model = Model()
guide = nested_auto_guide_callable(model)
# Check master ref for all nested components.
for _, m in guide.named_modules():
if m is guide:
continue
assert m.master is not None and m.master() is guide, "master ref wrong for {}".format(m._pyro_name)
infer = SVI(model, guide, Adam({'lr': 0.005}), Elbo(strict_enumeration_warning=False))
for _ in range(20):
infer.step()
guide_trace = poutine.trace(guide).get_trace()
model_trace = poutine.trace(model).get_trace()
check_model_guide_match(model_trace, guide_trace)
assert all(p.startswith("AutoGuideList.0") or p.startswith("AutoGuideList.1.z")
for p in guide_trace.param_nodes)
stochastic_nodes = set(guide_trace.stochastic_nodes)
assert "x" in stochastic_nodes
assert "y" in stochastic_nodes
# Only latent sampled is for the IAF.
assert "_AutoGuideList.1.z_latent" in stochastic_nodes
@pytest.mark.parametrize("auto_class", [
AutoDelta,
AutoDiagonalNormal,
AutoMultivariateNormal,
AutoLowRankMultivariateNormal,
AutoLaplaceApproximation,
functools.partial(AutoDiagonalNormal, init_loc_fn=init_to_mean),
functools.partial(AutoDiagonalNormal, init_loc_fn=init_to_median),
])
@pytest.mark.parametrize("Elbo", [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])
def test_linear_regression_smoke(auto_class, Elbo):
N, D = 10, 3
class RandomLinear(nn.Linear, PyroModule):
def __init__(self, in_features, out_features):
super().__init__(in_features, out_features)
self.weight = PyroSample(dist.Normal(0., 1.).expand([out_features, in_features]).to_event(2))
self.bias = PyroSample(dist.Normal(0., 10.).expand([out_features]).to_event(1))
class LinearRegression(PyroModule):
def __init__(self):
super().__init__()
self.linear = RandomLinear(D, 1)
def forward(self, x, y=None):
mean = self.linear(x).squeeze(-1)
sigma = pyro.sample("sigma", dist.LogNormal(0., 1.))
with pyro.plate('plate', N):
return pyro.sample('obs', dist.Normal(mean, sigma), obs=y)
x, y = torch.randn(N, D), torch.randn(N)
model = LinearRegression()
guide = auto_class(model)
infer = SVI(model, guide, Adam({'lr': 0.005}), Elbo(strict_enumeration_warning=False))
infer.step(x, y)
@pytest.mark.parametrize("auto_class", [
AutoDelta,
AutoDiagonalNormal,
AutoMultivariateNormal,
AutoLowRankMultivariateNormal,
AutoLaplaceApproximation,
functools.partial(AutoDiagonalNormal, init_loc_fn=init_to_mean),
functools.partial(AutoDiagonalNormal, init_loc_fn=init_to_median),
])
def test_predictive(auto_class):
N, D = 3, 2
class RandomLinear(nn.Linear, PyroModule):
def __init__(self, in_features, out_features):
super().__init__(in_features, out_features)
self.weight = PyroSample(dist.Normal(0., 1.).expand([out_features, in_features]).to_event(2))
self.bias = PyroSample(dist.Normal(0., 10.).expand([out_features]).to_event(1))
class LinearRegression(PyroModule):
def __init__(self):
super().__init__()
self.linear = RandomLinear(D, 1)
def forward(self, x, y=None):
mean = self.linear(x).squeeze(-1)
sigma = pyro.sample("sigma", dist.LogNormal(0., 1.))
with pyro.plate('plate', N):
return pyro.sample('obs', dist.Normal(mean, sigma), obs=y)
x, y = torch.randn(N, D), torch.randn(N)
model = LinearRegression()
guide = auto_class(model)
# XXX: Record `y` as observed in the prototype trace
# Is there a better pattern to follow?
guide(x, y=y)
# Test predictive module
model_trace = poutine.trace(model).get_trace(x, y=None)
predictive = Predictive(model, guide=guide, num_samples=10)
pyro.set_rng_seed(0)
samples = predictive(x)
for site in prune_subsample_sites(model_trace).stochastic_nodes:
assert site in samples
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=torch.jit.TracerWarning)
traced_predictive = torch.jit.trace_module(predictive, {"call": (x,)})
f = io.BytesIO()
torch.jit.save(traced_predictive, f)
f.seek(0)
predictive_deser = torch.jit.load(f)
pyro.set_rng_seed(0)
samples_deser = predictive_deser.call(x)
# Note that the site values are different in the serialized guide
assert len(samples) == len(samples_deser)
| [
"torch.eye",
"pyro.plate",
"pyro.distributions.Beta",
"torch.jit.trace_module",
"torch.randn",
"pyro.infer.autoguide.AutoGuideList",
"pyro.infer.Predictive",
"pyro.factor",
"torch.arange",
"pytest.mark.parametrize",
"pyro.poutine.block",
"pyro.poutine.trace",
"torch.ones",
"pyro.distributi... | [((981, 1114), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""auto_class"""', '[AutoDiagonalNormal, AutoMultivariateNormal, AutoLowRankMultivariateNormal,\n AutoIAFNormal]'], {}), "('auto_class', [AutoDiagonalNormal,\n AutoMultivariateNormal, AutoLowRankMultivariateNormal, AutoIAFNormal])\n", (1004, 1114), False, 'import pytest\n'), ((1897, 1975), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""Elbo"""', '[Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO]'], {}), "('Elbo', [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])\n", (1920, 1975), False, 'import pytest\n'), ((1977, 2151), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""auto_class"""', '[AutoDelta, AutoDiagonalNormal, AutoMultivariateNormal,\n AutoLowRankMultivariateNormal, AutoIAFNormal, AutoLaplaceApproximation]'], {}), "('auto_class', [AutoDelta, AutoDiagonalNormal,\n AutoMultivariateNormal, AutoLowRankMultivariateNormal, AutoIAFNormal,\n AutoLaplaceApproximation])\n", (2000, 2151), False, 'import pytest\n'), ((2919, 2997), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""Elbo"""', '[Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO]'], {}), "('Elbo', [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])\n", (2942, 2997), False, 'import pytest\n'), ((2999, 3107), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""init_loc_fn"""', '[init_to_feasible, init_to_mean, init_to_median, init_to_sample]'], {}), "('init_loc_fn', [init_to_feasible, init_to_mean,\n init_to_median, init_to_sample])\n", (3022, 3107), False, 'import pytest\n'), ((3124, 3298), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""auto_class"""', '[AutoDelta, AutoDiagonalNormal, AutoMultivariateNormal,\n AutoLowRankMultivariateNormal, AutoIAFNormal, AutoLaplaceApproximation]'], {}), "('auto_class', [AutoDelta, AutoDiagonalNormal,\n AutoMultivariateNormal, AutoLowRankMultivariateNormal, AutoIAFNormal,\n AutoLaplaceApproximation])\n", (3147, 3298), False, 'import pytest\n'), ((4039, 4104), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""sequential plate is not yet supported"""'}), "(reason='sequential plate is not yet supported')\n", (4056, 4104), False, 'import pytest\n'), ((4106, 4280), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""auto_class"""', '[AutoDelta, AutoDiagonalNormal, AutoMultivariateNormal,\n AutoLowRankMultivariateNormal, AutoIAFNormal, AutoLaplaceApproximation]'], {}), "('auto_class', [AutoDelta, AutoDiagonalNormal,\n AutoMultivariateNormal, AutoLowRankMultivariateNormal, AutoIAFNormal,\n AutoLaplaceApproximation])\n", (4129, 4280), False, 'import pytest\n'), ((4301, 4363), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""Elbo"""', '[Trace_ELBO, TraceGraph_ELBO]'], {}), "('Elbo', [Trace_ELBO, TraceGraph_ELBO])\n", (4324, 4363), False, 'import pytest\n'), ((7246, 7324), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""Elbo"""', '[Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO]'], {}), "('Elbo', [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])\n", (7269, 7324), False, 'import pytest\n'), ((8773, 8851), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""Elbo"""', '[Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO]'], {}), "('Elbo', [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])\n", (8796, 8851), False, 'import pytest\n'), ((10354, 10502), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""auto_class"""', '[AutoDiagonalNormal, AutoMultivariateNormal, AutoLowRankMultivariateNormal,\n AutoLaplaceApproximation]'], {}), "('auto_class', [AutoDiagonalNormal,\n AutoMultivariateNormal, AutoLowRankMultivariateNormal,\n AutoLaplaceApproximation])\n", (10377, 10502), False, 'import pytest\n'), ((10515, 10593), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""Elbo"""', '[Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO]'], {}), "('Elbo', [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])\n", (10538, 10593), False, 'import pytest\n'), ((11978, 12158), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""continuous_class"""', '[AutoDelta, AutoDiagonalNormal, AutoMultivariateNormal,\n AutoLowRankMultivariateNormal, AutoIAFNormal, AutoLaplaceApproximation]'], {}), "('continuous_class', [AutoDelta, AutoDiagonalNormal,\n AutoMultivariateNormal, AutoLowRankMultivariateNormal, AutoIAFNormal,\n AutoLaplaceApproximation])\n", (12001, 12158), False, 'import pytest\n'), ((13138, 13312), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""auto_class"""', '[AutoDelta, AutoDiagonalNormal, AutoMultivariateNormal,\n AutoLowRankMultivariateNormal, AutoIAFNormal, AutoLaplaceApproximation]'], {}), "('auto_class', [AutoDelta, AutoDiagonalNormal,\n AutoMultivariateNormal, AutoLowRankMultivariateNormal, AutoIAFNormal,\n AutoLaplaceApproximation])\n", (13161, 13312), False, 'import pytest\n'), ((13703, 13862), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""auto_class"""', '[AutoDelta, AutoDiagonalNormal, AutoMultivariateNormal,\n AutoLowRankMultivariateNormal, AutoLaplaceApproximation]'], {}), "('auto_class', [AutoDelta, AutoDiagonalNormal,\n AutoMultivariateNormal, AutoLowRankMultivariateNormal,\n AutoLaplaceApproximation])\n", (13726, 13862), False, 'import pytest\n'), ((14360, 14519), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""auto_class"""', '[AutoDelta, AutoDiagonalNormal, AutoMultivariateNormal,\n AutoLowRankMultivariateNormal, AutoLaplaceApproximation]'], {}), "('auto_class', [AutoDelta, AutoDiagonalNormal,\n AutoMultivariateNormal, AutoLowRankMultivariateNormal,\n AutoLaplaceApproximation])\n", (14383, 14519), False, 'import pytest\n'), ((15523, 15652), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""auto_class"""', '[AutoDelta, AutoDiagonalNormal, AutoMultivariateNormal,\n AutoLowRankMultivariateNormal]'], {}), "('auto_class', [AutoDelta, AutoDiagonalNormal,\n AutoMultivariateNormal, AutoLowRankMultivariateNormal])\n", (15546, 15652), False, 'import pytest\n'), ((16343, 16402), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""init_scale"""', '[0.1, 0.0001, 1e-08]'], {}), "('init_scale', [0.1, 0.0001, 1e-08])\n", (16366, 16402), False, 'import pytest\n'), ((16402, 16559), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""auto_class"""', '[AutoDiagonalNormal, AutoMultivariateNormal, AutoLowRankMultivariateNormal,\n AutoLowRankMultivariateNormal_100]'], {}), "('auto_class', [AutoDiagonalNormal,\n AutoMultivariateNormal, AutoLowRankMultivariateNormal,\n AutoLowRankMultivariateNormal_100])\n", (16425, 16559), False, 'import pytest\n'), ((17462, 17540), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""Elbo"""', '[Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO]'], {}), "('Elbo', [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])\n", (17485, 17540), False, 'import pytest\n'), ((18402, 18480), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""Elbo"""', '[Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO]'], {}), "('Elbo', [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])\n", (18425, 18480), False, 'import pytest\n'), ((20212, 20290), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""Elbo"""', '[Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO]'], {}), "('Elbo', [Trace_ELBO, TraceGraph_ELBO, TraceEnum_ELBO])\n", (20235, 20290), False, 'import pytest\n'), ((2714, 2736), 'pyro.set_rng_seed', 'pyro.set_rng_seed', (['(123)'], {}), '(123)\n', (2731, 2736), False, 'import pyro\n'), ((2796, 2818), 'pyro.set_rng_seed', 'pyro.set_rng_seed', (['(123)'], {}), '(123)\n', (2813, 2818), False, 'import pyro\n'), ((2878, 2915), 'tests.common.assert_close', 'assert_close', (['(loss_5 - loss_4)', '(-1 - 3)'], {}), '(loss_5 - loss_4, -1 - 3)\n', (2890, 2915), False, 'from tests.common import assert_close, assert_equal\n'), ((4012, 4029), 'numpy.isfinite', 'np.isfinite', (['loss'], {}), '(loss)\n', (4023, 4029), True, 'import numpy as np\n'), ((5047, 5067), 'pyro.infer.autoguide.AutoGuideList', 'AutoGuideList', (['model'], {}), '(model)\n', (5060, 5067), False, 'from pyro.infer.autoguide import AutoCallable, AutoDelta, AutoDiagonalNormal, AutoDiscreteParallel, AutoGuide, AutoGuideList, AutoIAFNormal, AutoLaplaceApproximation, AutoLowRankMultivariateNormal, AutoMultivariateNormal, init_to_feasible, init_to_mean, init_to_median, init_to_sample\n'), ((5566, 5586), 'pyro.infer.autoguide.AutoGuideList', 'AutoGuideList', (['model'], {}), '(model)\n', (5579, 5586), False, 'from pyro.infer.autoguide import AutoCallable, AutoDelta, AutoDiagonalNormal, AutoDiscreteParallel, AutoGuide, AutoGuideList, AutoIAFNormal, AutoLaplaceApproximation, AutoLowRankMultivariateNormal, AutoMultivariateNormal, init_to_feasible, init_to_mean, init_to_median, init_to_sample\n'), ((6247, 6267), 'pyro.infer.autoguide.AutoGuideList', 'AutoGuideList', (['model'], {}), '(model)\n', (6260, 6267), False, 'from pyro.infer.autoguide import AutoCallable, AutoDelta, AutoDiagonalNormal, AutoDiscreteParallel, AutoGuide, AutoGuideList, AutoIAFNormal, AutoLaplaceApproximation, AutoLowRankMultivariateNormal, AutoMultivariateNormal, init_to_feasible, init_to_mean, init_to_median, init_to_sample\n'), ((6445, 6465), 'pyro.infer.autoguide.AutoGuideList', 'AutoGuideList', (['model'], {}), '(model)\n', (6458, 6465), False, 'from pyro.infer.autoguide import AutoCallable, AutoDelta, AutoDiagonalNormal, AutoDiscreteParallel, AutoGuide, AutoGuideList, AutoIAFNormal, AutoLaplaceApproximation, AutoLowRankMultivariateNormal, AutoMultivariateNormal, init_to_feasible, init_to_mean, init_to_median, init_to_sample\n'), ((7566, 7606), 'pyro.optim.Adam', 'Adam', (["{'lr': 0.05, 'betas': (0.8, 0.99)}"], {}), "({'lr': 0.05, 'betas': (0.8, 0.99)})\n", (7570, 7606), False, 'from pyro.optim import Adam\n'), ((7730, 7760), 'pyro.infer.SVI', 'SVI', (['model', 'guide', 'optim', 'elbo'], {}), '(model, guide, optim, elbo)\n', (7733, 7760), False, 'from pyro.infer import SVI, Trace_ELBO, TraceEnum_ELBO, TraceGraph_ELBO, Predictive\n'), ((9250, 9270), 'pyro.set_rng_seed', 'pyro.set_rng_seed', (['(0)'], {}), '(0)\n', (9267, 9270), False, 'import pyro\n'), ((9630, 9642), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (9640, 9642), False, 'import io\n'), ((9647, 9678), 'torch.jit.save', 'torch.jit.save', (['traced_guide', 'f'], {}), '(traced_guide, f)\n', (9661, 9678), False, 'import torch\n'), ((9711, 9728), 'torch.jit.load', 'torch.jit.load', (['f'], {}), '(f)\n', (9725, 9728), False, 'import torch\n'), ((9762, 9782), 'pyro.set_rng_seed', 'pyro.set_rng_seed', (['(0)'], {}), '(0)\n', (9779, 9782), False, 'import pyro\n'), ((10838, 10878), 'pyro.optim.Adam', 'Adam', (["{'lr': 0.05, 'betas': (0.8, 0.99)}"], {}), "({'lr': 0.05, 'betas': (0.8, 0.99)})\n", (10842, 10878), False, 'from pyro.optim import Adam\n'), ((11002, 11032), 'pyro.infer.SVI', 'SVI', (['model', 'guide', 'optim', 'elbo'], {}), '(model, guide, optim, elbo)\n', (11005, 11032), False, 'from pyro.infer import SVI, Trace_ELBO, TraceEnum_ELBO, TraceGraph_ELBO, Predictive\n'), ((12245, 12287), 'torch.tensor', 'torch.tensor', (['[0.0, 1.0, 10.0, 11.0, 12.0]'], {}), '([0.0, 1.0, 10.0, 11.0, 12.0])\n', (12257, 12287), False, 'import torch\n'), ((12818, 12838), 'pyro.infer.autoguide.AutoGuideList', 'AutoGuideList', (['model'], {}), '(model)\n', (12831, 12838), False, 'from pyro.infer.autoguide import AutoCallable, AutoDelta, AutoDiagonalNormal, AutoDiscreteParallel, AutoGuide, AutoGuideList, AutoIAFNormal, AutoLaplaceApproximation, AutoLowRankMultivariateNormal, AutoMultivariateNormal, init_to_feasible, init_to_mean, init_to_median, init_to_sample\n'), ((13013, 13048), 'pyro.infer.TraceEnum_ELBO', 'TraceEnum_ELBO', ([], {'max_plate_nesting': '(1)'}), '(max_plate_nesting=1)\n', (13027, 13048), False, 'from pyro.infer import SVI, Trace_ELBO, TraceEnum_ELBO, TraceGraph_ELBO, Predictive\n'), ((13111, 13128), 'numpy.isfinite', 'np.isfinite', (['loss'], {}), '(loss)\n', (13122, 13128), True, 'import numpy as np\n'), ((13537, 13557), 'pyro.infer.autoguide.AutoGuideList', 'AutoGuideList', (['model'], {}), '(model)\n', (13550, 13557), False, 'from pyro.infer.autoguide import AutoCallable, AutoDelta, AutoDiagonalNormal, AutoDiscreteParallel, AutoGuide, AutoGuideList, AutoIAFNormal, AutoLaplaceApproximation, AutoLowRankMultivariateNormal, AutoMultivariateNormal, init_to_feasible, init_to_mean, init_to_median, init_to_sample\n'), ((14187, 14207), 'pyro.infer.autoguide.AutoGuideList', 'AutoGuideList', (['model'], {}), '(model)\n', (14200, 14207), False, 'from pyro.infer.autoguide import AutoCallable, AutoDelta, AutoDiagonalNormal, AutoDiscreteParallel, AutoGuide, AutoGuideList, AutoIAFNormal, AutoLaplaceApproximation, AutoLowRankMultivariateNormal, AutoMultivariateNormal, init_to_feasible, init_to_mean, init_to_median, init_to_sample\n'), ((14884, 14904), 'pyro.infer.autoguide.AutoGuideList', 'AutoGuideList', (['model'], {}), '(model)\n', (14897, 14904), False, 'from pyro.infer.autoguide import AutoCallable, AutoDelta, AutoDiagonalNormal, AutoDiscreteParallel, AutoGuide, AutoGuideList, AutoIAFNormal, AutoLaplaceApproximation, AutoLowRankMultivariateNormal, AutoMultivariateNormal, init_to_feasible, init_to_mean, init_to_median, init_to_sample\n'), ((15133, 15158), 'pyro.infer.autoguide.AutoDiagonalNormal', 'AutoDiagonalNormal', (['model'], {}), '(model)\n', (15151, 15158), False, 'from pyro.infer.autoguide import AutoCallable, AutoDelta, AutoDiagonalNormal, AutoDiscreteParallel, AutoGuide, AutoGuideList, AutoIAFNormal, AutoLaplaceApproximation, AutoLowRankMultivariateNormal, AutoMultivariateNormal, init_to_feasible, init_to_mean, init_to_median, init_to_sample\n'), ((15346, 15371), 'pyro.infer.autoguide.AutoDiagonalNormal', 'AutoDiagonalNormal', (['model'], {}), '(model)\n', (15364, 15371), False, 'from pyro.infer.autoguide import AutoCallable, AutoDelta, AutoDiagonalNormal, AutoDiscreteParallel, AutoGuide, AutoGuideList, AutoIAFNormal, AutoLaplaceApproximation, AutoLowRankMultivariateNormal, AutoMultivariateNormal, init_to_feasible, init_to_mean, init_to_median, init_to_sample\n'), ((16069, 16106), 'tests.common.assert_equal', 'assert_equal', (["median['x']", "inits['x']"], {}), "(median['x'], inits['x'])\n", (16081, 16106), False, 'from tests.common import assert_close, assert_equal\n'), ((16111, 16148), 'tests.common.assert_equal', 'assert_equal', (["median['y']", "inits['y']"], {}), "(median['y'], inits['y'])\n", (16123, 16148), False, 'from tests.common import assert_close, assert_equal\n'), ((19482, 19531), 'pyro.util.check_model_guide_match', 'check_model_guide_match', (['model_trace', 'guide_trace'], {}), '(model_trace, guide_trace)\n', (19505, 19531), False, 'from pyro.util import check_model_guide_match\n'), ((22813, 22859), 'pyro.infer.Predictive', 'Predictive', (['model'], {'guide': 'guide', 'num_samples': '(10)'}), '(model, guide=guide, num_samples=10)\n', (22823, 22859), False, 'from pyro.infer import SVI, Trace_ELBO, TraceEnum_ELBO, TraceGraph_ELBO, Predictive\n'), ((22864, 22884), 'pyro.set_rng_seed', 'pyro.set_rng_seed', (['(0)'], {}), '(0)\n', (22881, 22884), False, 'import pyro\n'), ((23212, 23224), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (23222, 23224), False, 'import io\n'), ((23229, 23265), 'torch.jit.save', 'torch.jit.save', (['traced_predictive', 'f'], {}), '(traced_predictive, f)\n', (23243, 23265), False, 'import torch\n'), ((23303, 23320), 'torch.jit.load', 'torch.jit.load', (['f'], {}), '(f)\n', (23317, 23320), False, 'import torch\n'), ((23325, 23345), 'pyro.set_rng_seed', 'pyro.set_rng_seed', (['(0)'], {}), '(0)\n', (23342, 23345), False, 'import pyro\n'), ((2291, 2320), 'pyro.factor', 'pyro.factor', (['"""f1"""', 'log_factor'], {}), "('f1', log_factor)\n", (2302, 2320), False, 'import pyro\n'), ((2665, 2682), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (2677, 2682), False, 'import torch\n'), ((2774, 2791), 'torch.tensor', 'torch.tensor', (['(5.0)'], {}), '(5.0)\n', (2786, 2791), False, 'import torch\n'), ((2856, 2873), 'torch.tensor', 'torch.tensor', (['(4.0)'], {}), '(4.0)\n', (2868, 2873), False, 'import torch\n'), ((4518, 4540), 'pyro.plate', 'pyro.plate', (['"""plate"""', '(3)'], {}), "('plate', 3)\n", (4528, 4540), False, 'import pyro\n'), ((4926, 4945), 'pyro.optim.Adam', 'Adam', (["{'lr': 1e-06}"], {}), "({'lr': 1e-06})\n", (4930, 4945), False, 'from pyro.optim import Adam\n'), ((5604, 5642), 'pyro.infer.autoguide.AutoCallable', 'AutoCallable', (['model', 'guide_x', 'median_x'], {}), '(model, guide_x, median_x)\n', (5616, 5642), False, 'from pyro.infer.autoguide import AutoCallable, AutoDelta, AutoDiagonalNormal, AutoDiscreteParallel, AutoGuide, AutoGuideList, AutoIAFNormal, AutoLaplaceApproximation, AutoLowRankMultivariateNormal, AutoMultivariateNormal, init_to_feasible, init_to_mean, init_to_median, init_to_sample\n'), ((6341, 6373), 'pyro.poutine.block', 'poutine.block', (['model'], {'hide': "['x']"}), "(model, hide=['x'])\n", (6354, 6373), True, 'import pyro.poutine as poutine\n'), ((6558, 6592), 'pyro.poutine.block', 'poutine.block', (['model'], {'expose': "['y']"}), "(model, expose=['y'])\n", (6571, 6592), True, 'import pyro.poutine as poutine\n'), ((6624, 6658), 'pyro.poutine.block', 'poutine.block', (['model'], {'expose': "['y']"}), "(model, expose=['y'])\n", (6637, 6658), True, 'import pyro.poutine as poutine\n'), ((7960, 7977), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (7972, 7977), False, 'import torch\n'), ((8194, 8211), 'torch.tensor', 'torch.tensor', (['(0.5)'], {}), '(0.5)\n', (8206, 8211), False, 'import torch\n'), ((6962, 7029), 'functools.partial', 'functools.partial', (['AutoDiagonalNormal'], {'init_loc_fn': 'init_to_feasible'}), '(AutoDiagonalNormal, init_loc_fn=init_to_feasible)\n', (6979, 7029), False, 'import functools\n'), ((7035, 7098), 'functools.partial', 'functools.partial', (['AutoDiagonalNormal'], {'init_loc_fn': 'init_to_mean'}), '(AutoDiagonalNormal, init_loc_fn=init_to_mean)\n', (7052, 7098), False, 'import functools\n'), ((7104, 7169), 'functools.partial', 'functools.partial', (['AutoDiagonalNormal'], {'init_loc_fn': 'init_to_median'}), '(AutoDiagonalNormal, init_loc_fn=init_to_median)\n', (7121, 7169), False, 'import functools\n'), ((7175, 7240), 'functools.partial', 'functools.partial', (['AutoDiagonalNormal'], {'init_loc_fn': 'init_to_sample'}), '(AutoDiagonalNormal, init_loc_fn=init_to_sample)\n', (7192, 7240), False, 'import functools\n'), ((9366, 9391), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (9389, 9391), False, 'import warnings\n'), ((9401, 9468), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'torch.jit.TracerWarning'}), "('ignore', category=torch.jit.TracerWarning)\n", (9424, 9468), False, 'import warnings\n'), ((9559, 9621), 'torch.jit.trace_module', 'torch.jit.trace_module', (['guide', "{'call': ()}"], {'check_trace': '(False)'}), "(guide, {'call': ()}, check_trace=False)\n", (9581, 9621), False, 'import torch\n'), ((10268, 10284), 'operator.attrgetter', 'attrgetter', (['name'], {}), '(name)\n', (10278, 10284), False, 'from operator import attrgetter\n'), ((8489, 8556), 'functools.partial', 'functools.partial', (['AutoDiagonalNormal'], {'init_loc_fn': 'init_to_feasible'}), '(AutoDiagonalNormal, init_loc_fn=init_to_feasible)\n', (8506, 8556), False, 'import functools\n'), ((8562, 8625), 'functools.partial', 'functools.partial', (['AutoDiagonalNormal'], {'init_loc_fn': 'init_to_mean'}), '(AutoDiagonalNormal, init_loc_fn=init_to_mean)\n', (8579, 8625), False, 'import functools\n'), ((8631, 8696), 'functools.partial', 'functools.partial', (['AutoDiagonalNormal'], {'init_loc_fn': 'init_to_median'}), '(AutoDiagonalNormal, init_loc_fn=init_to_median)\n', (8648, 8696), False, 'import functools\n'), ((8702, 8767), 'functools.partial', 'functools.partial', (['AutoDiagonalNormal'], {'init_loc_fn': 'init_to_sample'}), '(AutoDiagonalNormal, init_loc_fn=init_to_sample)\n', (8719, 8767), False, 'import functools\n'), ((11292, 11338), 'tests.common.assert_equal', 'assert_equal', (['median[name]', 'quantiles[name][1]'], {}), '(median[name], quantiles[name][1])\n', (11304, 11338), False, 'from tests.common import assert_close, assert_equal\n'), ((15168, 15195), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (15181, 15195), False, 'import pytest\n'), ((15868, 15883), 'torch.randn', 'torch.randn', (['()'], {}), '(())\n', (15879, 15883), False, 'import torch\n'), ((15890, 15904), 'torch.randn', 'torch.randn', (['(5)'], {}), '(5)\n', (15901, 15904), False, 'import torch\n'), ((18034, 18053), 'pyro.optim.Adam', 'Adam', (["{'lr': 0.005}"], {}), "({'lr': 0.005})\n", (18038, 18053), False, 'from pyro.optim import Adam\n'), ((18302, 18319), 'torch.tensor', 'torch.tensor', (['(1.0)'], {}), '(1.0)\n', (18314, 18319), False, 'import torch\n'), ((18370, 18387), 'torch.tensor', 'torch.tensor', (['(2.0)'], {}), '(2.0)\n', (18382, 18387), False, 'import torch\n'), ((17322, 17385), 'functools.partial', 'functools.partial', (['AutoDiagonalNormal'], {'init_loc_fn': 'init_to_mean'}), '(AutoDiagonalNormal, init_loc_fn=init_to_mean)\n', (17339, 17385), False, 'import functools\n'), ((17391, 17456), 'functools.partial', 'functools.partial', (['AutoDiagonalNormal'], {'init_loc_fn': 'init_to_median'}), '(AutoDiagonalNormal, init_loc_fn=init_to_median)\n', (17408, 17456), False, 'import functools\n'), ((19269, 19288), 'pyro.optim.Adam', 'Adam', (["{'lr': 0.005}"], {}), "({'lr': 0.005})\n", (19273, 19288), False, 'from pyro.optim import Adam\n'), ((21140, 21157), 'torch.randn', 'torch.randn', (['N', 'D'], {}), '(N, D)\n', (21151, 21157), False, 'import torch\n'), ((21159, 21173), 'torch.randn', 'torch.randn', (['N'], {}), '(N)\n', (21170, 21173), False, 'import torch\n'), ((21265, 21284), 'pyro.optim.Adam', 'Adam', (["{'lr': 0.005}"], {}), "({'lr': 0.005})\n", (21269, 21284), False, 'from pyro.optim import Adam\n'), ((20072, 20135), 'functools.partial', 'functools.partial', (['AutoDiagonalNormal'], {'init_loc_fn': 'init_to_mean'}), '(AutoDiagonalNormal, init_loc_fn=init_to_mean)\n', (20089, 20135), False, 'import functools\n'), ((20141, 20206), 'functools.partial', 'functools.partial', (['AutoDiagonalNormal'], {'init_loc_fn': 'init_to_median'}), '(AutoDiagonalNormal, init_loc_fn=init_to_median)\n', (20158, 20206), False, 'import functools\n'), ((22494, 22511), 'torch.randn', 'torch.randn', (['N', 'D'], {}), '(N, D)\n', (22505, 22511), False, 'import torch\n'), ((22513, 22527), 'torch.randn', 'torch.randn', (['N'], {}), '(N)\n', (22524, 22527), False, 'import torch\n'), ((22929, 22963), 'pyro.poutine.util.prune_subsample_sites', 'prune_subsample_sites', (['model_trace'], {}), '(model_trace)\n', (22950, 22963), False, 'from pyro.poutine.util import prune_subsample_sites\n'), ((23022, 23047), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (23045, 23047), False, 'import warnings\n'), ((23057, 23124), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'torch.jit.TracerWarning'}), "('ignore', category=torch.jit.TracerWarning)\n", (23080, 23124), False, 'import warnings\n'), ((23153, 23203), 'torch.jit.trace_module', 'torch.jit.trace_module', (['predictive', "{'call': (x,)}"], {}), "(predictive, {'call': (x,)})\n", (23175, 23203), False, 'import torch\n'), ((21526, 21589), 'functools.partial', 'functools.partial', (['AutoDiagonalNormal'], {'init_loc_fn': 'init_to_mean'}), '(AutoDiagonalNormal, init_loc_fn=init_to_mean)\n', (21543, 21589), False, 'import functools\n'), ((21595, 21660), 'functools.partial', 'functools.partial', (['AutoDiagonalNormal'], {'init_loc_fn': 'init_to_median'}), '(AutoDiagonalNormal, init_loc_fn=init_to_median)\n', (21612, 21660), False, 'import functools\n'), ((1396, 1416), 'pyro.poutine.trace', 'poutine.trace', (['guide'], {}), '(guide)\n', (1409, 1416), True, 'import pyro.poutine as poutine\n'), ((2260, 2281), 'pyro.distributions.Normal', 'dist.Normal', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (2271, 2281), True, 'import pyro.distributions as dist\n'), ((2416, 2438), 'pyro.plate', 'pyro.plate', (['"""plate"""', '(3)'], {}), "('plate', 3)\n", (2426, 2438), False, 'import pyro\n'), ((2452, 2481), 'pyro.factor', 'pyro.factor', (['"""f2"""', 'log_factor'], {}), "('f2', log_factor)\n", (2463, 2481), False, 'import pyro\n'), ((3410, 3431), 'pyro.distributions.Normal', 'dist.Normal', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (3421, 3431), True, 'import pyro.distributions as dist\n'), ((3528, 3550), 'pyro.plate', 'pyro.plate', (['"""plate"""', '(3)'], {}), "('plate', 3)\n", (3538, 3550), False, 'import pyro\n'), ((4452, 4469), 'pyro.distributions.Normal', 'dist.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (4463, 4469), True, 'import pyro.distributions as dist\n'), ((4823, 4842), 'pyro.distributions.Bernoulli', 'dist.Bernoulli', (['(0.1)'], {}), '(0.1)\n', (4837, 4842), True, 'import pyro.distributions as dist\n'), ((5095, 5129), 'pyro.poutine.block', 'poutine.block', (['model'], {'expose': "['x']"}), "(model, expose=['x'])\n", (5108, 5129), True, 'import pyro.poutine as poutine\n'), ((5168, 5200), 'pyro.poutine.block', 'poutine.block', (['model'], {'hide': "['x']"}), "(model, hide=['x'])\n", (5181, 5200), True, 'import pyro.poutine as poutine\n'), ((5309, 5326), 'torch.tensor', 'torch.tensor', (['(1.0)'], {}), '(1.0)\n', (5321, 5326), False, 'import torch\n'), ((5367, 5384), 'torch.tensor', 'torch.tensor', (['(0.1)'], {}), '(0.1)\n', (5379, 5384), False, 'import torch\n'), ((5443, 5470), 'pyro.distributions.Normal', 'dist.Normal', (['x_loc', 'x_scale'], {}), '(x_loc, x_scale)\n', (5454, 5470), True, 'import pyro.distributions as dist\n'), ((5680, 5712), 'pyro.poutine.block', 'poutine.block', (['model'], {'hide': "['x']"}), "(model, hide=['x'])\n", (5693, 5712), True, 'import pyro.poutine as poutine\n'), ((6493, 6527), 'pyro.poutine.block', 'poutine.block', (['model'], {'expose': "['x']"}), "(model, expose=['x'])\n", (6506, 6527), True, 'import pyro.poutine as poutine\n'), ((7403, 7424), 'pyro.distributions.Normal', 'dist.Normal', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (7414, 7424), True, 'import pyro.distributions as dist\n'), ((7451, 7475), 'pyro.distributions.LogNormal', 'dist.LogNormal', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (7465, 7475), True, 'import pyro.distributions as dist\n'), ((7502, 7521), 'pyro.distributions.Beta', 'dist.Beta', (['(2.0)', '(2.0)'], {}), '(2.0, 2.0)\n', (7511, 7521), True, 'import pyro.distributions as dist\n'), ((8135, 8152), 'torch.tensor', 'torch.tensor', (['(1.0)'], {}), '(1.0)\n', (8147, 8152), False, 'import torch\n'), ((8946, 8967), 'pyro.distributions.Normal', 'dist.Normal', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (8957, 8967), True, 'import pyro.distributions as dist\n'), ((8982, 9004), 'pyro.plate', 'pyro.plate', (['"""plate"""', '(2)'], {}), "('plate', 2)\n", (8992, 9004), False, 'import pyro\n'), ((10675, 10696), 'pyro.distributions.Normal', 'dist.Normal', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (10686, 10696), True, 'import pyro.distributions as dist\n'), ((10723, 10747), 'pyro.distributions.LogNormal', 'dist.LogNormal', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (10737, 10747), True, 'import pyro.distributions as dist\n'), ((10774, 10793), 'pyro.distributions.Beta', 'dist.Beta', (['(2.0)', '(2.0)'], {}), '(2.0, 2.0)\n', (10783, 10793), True, 'import pyro.distributions as dist\n'), ((12502, 12522), 'pyro.distributions.LogNormal', 'dist.LogNormal', (['(0)', '(1)'], {}), '(0, 1)\n', (12516, 12522), True, 'import pyro.distributions as dist\n'), ((12873, 12914), 'pyro.poutine.block', 'poutine.block', (['model'], {'hide': "['assignment']"}), "(model, hide=['assignment'])\n", (12886, 12914), True, 'import pyro.poutine as poutine\n'), ((12955, 12998), 'pyro.poutine.block', 'poutine.block', (['model'], {'expose': "['assignment']"}), "(model, expose=['assignment'])\n", (12968, 12998), True, 'import pyro.poutine as poutine\n'), ((13586, 13620), 'pyro.poutine.block', 'poutine.block', (['model'], {'expose': "['x']"}), "(model, expose=['x'])\n", (13599, 13620), True, 'import pyro.poutine as poutine\n'), ((13651, 13685), 'pyro.poutine.block', 'poutine.block', (['model'], {'expose': "['y']"}), "(model, expose=['y'])\n", (13664, 13685), True, 'import pyro.poutine as poutine\n'), ((13952, 13973), 'pyro.distributions.Normal', 'dist.Normal', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (13963, 13973), True, 'import pyro.distributions as dist\n'), ((14112, 14129), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (14124, 14129), False, 'import torch\n'), ((14155, 14172), 'pyro.distributions.Delta', 'dist.Delta', (['x_loc'], {}), '(x_loc)\n', (14165, 14172), True, 'import pyro.distributions as dist\n'), ((14262, 14296), 'pyro.poutine.block', 'poutine.block', (['model'], {'expose': "['y']"}), "(model, expose=['y'])\n", (14275, 14296), True, 'import pyro.poutine as poutine\n'), ((14621, 14642), 'pyro.distributions.Normal', 'dist.Normal', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (14632, 14642), True, 'import pyro.distributions as dist\n'), ((14781, 14798), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (14793, 14798), False, 'import torch\n'), ((14828, 14845), 'pyro.distributions.Delta', 'dist.Delta', (['x_loc'], {}), '(x_loc)\n', (14838, 14845), True, 'import pyro.distributions as dist\n'), ((14959, 14993), 'pyro.poutine.block', 'poutine.block', (['model'], {'expose': "['y']"}), "(model, expose=['y'])\n", (14972, 14993), True, 'import pyro.poutine as poutine\n'), ((15745, 15766), 'pyro.distributions.Normal', 'dist.Normal', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (15756, 15766), True, 'import pyro.distributions as dist\n'), ((16659, 16680), 'pyro.distributions.Normal', 'dist.Normal', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (16670, 16680), True, 'import pyro.distributions as dist\n'), ((16776, 16800), 'pyro.plate', 'pyro.plate', (['"""plate"""', '(100)'], {}), "('plate', 100)\n", (16786, 16800), False, 'import pyro\n'), ((19394, 19414), 'pyro.poutine.trace', 'poutine.trace', (['guide'], {}), '(guide)\n', (19407, 19414), True, 'import pyro.poutine as poutine\n'), ((19445, 19465), 'pyro.poutine.trace', 'poutine.trace', (['model'], {}), '(model)\n', (19458, 19465), True, 'import pyro.poutine as poutine\n'), ((22754, 22774), 'pyro.poutine.trace', 'poutine.trace', (['model'], {}), '(model)\n', (22767, 22774), True, 'import pyro.poutine as poutine\n'), ((1324, 1345), 'pyro.distributions.Normal', 'dist.Normal', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (1335, 1345), True, 'import pyro.distributions as dist\n'), ((1461, 1495), 'pyro.poutine.replay', 'poutine.replay', (['model', 'guide_trace'], {}), '(model, guide_trace)\n', (1475, 1495), True, 'import pyro.poutine as poutine\n'), ((3676, 3690), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2)\n', (3687, 3690), False, 'import torch\n'), ((3692, 3704), 'torch.eye', 'torch.eye', (['(2)'], {}), '(2)\n', (3701, 3704), False, 'import torch\n'), ((3748, 3761), 'torch.ones', 'torch.ones', (['(3)'], {}), '(3)\n', (3758, 3761), False, 'import torch\n'), ((4848, 4863), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (4860, 4863), False, 'import torch\n'), ((5534, 5551), 'torch.tensor', 'torch.tensor', (['(1.0)'], {}), '(1.0)\n', (5546, 5551), False, 'import torch\n'), ((5911, 5928), 'torch.tensor', 'torch.tensor', (['(1.0)'], {}), '(1.0)\n', (5923, 5928), False, 'import torch\n'), ((5966, 5983), 'torch.tensor', 'torch.tensor', (['(0.1)'], {}), '(0.1)\n', (5978, 5983), False, 'import torch\n'), ((9035, 9059), 'pyro.distributions.LogNormal', 'dist.LogNormal', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (9049, 9059), True, 'import pyro.distributions as dist\n'), ((9090, 9109), 'pyro.distributions.Beta', 'dist.Beta', (['(2.0)', '(2.0)'], {}), '(2.0, 2.0)\n', (9099, 9109), True, 'import pyro.distributions as dist\n'), ((12699, 12724), 'pyro.distributions.Categorical', 'dist.Categorical', (['weights'], {}), '(weights)\n', (12715, 12724), True, 'import pyro.distributions as dist\n'), ((12757, 12793), 'pyro.distributions.Normal', 'dist.Normal', (['locs[assignment]', 'scale'], {}), '(locs[assignment], scale)\n', (12768, 12793), True, 'import pyro.distributions as dist\n'), ((13490, 13504), 'torch.zeros', 'torch.zeros', (['(5)'], {}), '(5)\n', (13501, 13504), False, 'import torch\n'), ((13506, 13521), 'torch.eye', 'torch.eye', (['(5)', '(5)'], {}), '(5, 5)\n', (13515, 13521), False, 'import torch\n'), ((14022, 14036), 'torch.zeros', 'torch.zeros', (['(5)'], {}), '(5)\n', (14033, 14036), False, 'import torch\n'), ((14038, 14053), 'torch.eye', 'torch.eye', (['(5)', '(5)'], {}), '(5, 5)\n', (14047, 14053), False, 'import torch\n'), ((14691, 14705), 'torch.zeros', 'torch.zeros', (['(5)'], {}), '(5)\n', (14702, 14705), False, 'import torch\n'), ((14707, 14722), 'torch.eye', 'torch.eye', (['(5)', '(5)'], {}), '(5, 5)\n', (14716, 14722), False, 'import torch\n'), ((15314, 15331), 'torch.tensor', 'torch.tensor', (['(1.0)'], {}), '(1.0)\n', (15326, 15331), False, 'import torch\n'), ((15815, 15829), 'torch.zeros', 'torch.zeros', (['(5)'], {}), '(5)\n', (15826, 15829), False, 'import torch\n'), ((15831, 15846), 'torch.eye', 'torch.eye', (['(5)', '(5)'], {}), '(5, 5)\n', (15840, 15846), False, 'import torch\n'), ((16729, 16743), 'torch.zeros', 'torch.zeros', (['(5)'], {}), '(5)\n', (16740, 16743), False, 'import torch\n'), ((16745, 16760), 'torch.eye', 'torch.eye', (['(5)', '(5)'], {}), '(5, 5)\n', (16754, 16760), False, 'import torch\n'), ((16831, 16852), 'pyro.distributions.Normal', 'dist.Normal', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (16842, 16852), True, 'import pyro.distributions as dist\n'), ((17710, 17727), 'torch.tensor', 'torch.tensor', (['(1.0)'], {}), '(1.0)\n', (17722, 17727), False, 'import torch\n'), ((17765, 17782), 'torch.tensor', 'torch.tensor', (['(0.1)'], {}), '(0.1)\n', (17777, 17782), False, 'import torch\n'), ((17863, 17900), 'pyro.distributions.Normal', 'dist.Normal', (['self.x_loc', 'self.x_scale'], {}), '(self.x_loc, self.x_scale)\n', (17874, 17900), True, 'import pyro.distributions as dist\n'), ((17931, 17952), 'pyro.distributions.Normal', 'dist.Normal', (['(2.0)', '(0.1)'], {}), '(2.0, 0.1)\n', (17942, 17952), True, 'import pyro.distributions as dist\n'), ((18641, 18658), 'torch.tensor', 'torch.tensor', (['(1.0)'], {}), '(1.0)\n', (18653, 18658), False, 'import torch\n'), ((18696, 18713), 'torch.tensor', 'torch.tensor', (['(0.1)'], {}), '(0.1)\n', (18708, 18713), False, 'import torch\n'), ((18794, 18831), 'pyro.distributions.Normal', 'dist.Normal', (['self.x_loc', 'self.x_scale'], {}), '(self.x_loc, self.x_scale)\n', (18805, 18831), True, 'import pyro.distributions as dist\n'), ((18850, 18872), 'pyro.plate', 'pyro.plate', (['"""plate"""', '(2)'], {}), "('plate', 2)\n", (18860, 18872), False, 'import pyro\n'), ((20988, 21012), 'pyro.distributions.LogNormal', 'dist.LogNormal', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (21002, 21012), True, 'import pyro.distributions as dist\n'), ((21029, 21051), 'pyro.plate', 'pyro.plate', (['"""plate"""', 'N'], {}), "('plate', N)\n", (21039, 21051), False, 'import pyro\n'), ((22342, 22366), 'pyro.distributions.LogNormal', 'dist.LogNormal', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (22356, 22366), True, 'import pyro.distributions as dist\n'), ((22383, 22405), 'pyro.plate', 'pyro.plate', (['"""plate"""', 'N'], {}), "('plate', N)\n", (22393, 22405), False, 'import pyro\n'), ((2524, 2538), 'torch.zeros', 'torch.zeros', (['(3)'], {}), '(3)\n', (2535, 2538), False, 'import torch\n'), ((2540, 2553), 'torch.ones', 'torch.ones', (['(3)'], {}), '(3)\n', (2550, 2553), False, 'import torch\n'), ((3594, 3608), 'torch.zeros', 'torch.zeros', (['(3)'], {}), '(3)\n', (3605, 3608), False, 'import torch\n'), ((3610, 3623), 'torch.ones', 'torch.ones', (['(3)'], {}), '(3)\n', (3620, 3623), False, 'import torch\n'), ((6104, 6141), 'pyro.distributions.Normal', 'dist.Normal', (['self.x_loc', 'self.x_scale'], {}), '(self.x_loc, self.x_scale)\n', (6115, 6141), True, 'import pyro.distributions as dist\n'), ((8055, 8073), 'torch.tensor', 'torch.tensor', (['(-1.0)'], {}), '(-1.0)\n', (8067, 8073), False, 'import torch\n'), ((12367, 12380), 'torch.ones', 'torch.ones', (['K'], {}), '(K)\n', (12377, 12380), False, 'import torch\n'), ((13408, 13429), 'pyro.distributions.Normal', 'dist.Normal', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (13419, 13429), True, 'import pyro.distributions as dist\n'), ((18907, 18928), 'pyro.distributions.Normal', 'dist.Normal', (['(2.0)', '(0.1)'], {}), '(2.0, 0.1)\n', (18918, 18928), True, 'import pyro.distributions as dist\n'), ((21095, 21119), 'pyro.distributions.Normal', 'dist.Normal', (['mean', 'sigma'], {}), '(mean, sigma)\n', (21106, 21119), True, 'import pyro.distributions as dist\n'), ((22449, 22473), 'pyro.distributions.Normal', 'dist.Normal', (['mean', 'sigma'], {}), '(mean, sigma)\n', (22460, 22473), True, 'import pyro.distributions as dist\n'), ((1245, 1266), 'pyro.distributions.Normal', 'dist.Normal', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (1256, 1266), True, 'import pyro.distributions as dist\n'), ((2359, 2373), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2)\n', (2370, 2373), False, 'import torch\n'), ((2375, 2388), 'torch.ones', 'torch.ones', (['(2)'], {}), '(2)\n', (2385, 2388), False, 'import torch\n'), ((3471, 3485), 'torch.zeros', 'torch.zeros', (['(2)'], {}), '(2)\n', (3482, 3485), False, 'import torch\n'), ((3487, 3500), 'torch.ones', 'torch.ones', (['(2)'], {}), '(2)\n', (3497, 3500), False, 'import torch\n'), ((3826, 3841), 'torch.arange', 'torch.arange', (['(2)'], {}), '(2)\n', (3838, 3841), False, 'import torch\n'), ((4718, 4735), 'pyro.distributions.Normal', 'dist.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (4729, 4735), True, 'import pyro.distributions as dist\n'), ((12418, 12436), 'pyro.distributions.Normal', 'dist.Normal', (['(0)', '(10)'], {}), '(0, 10)\n', (12429, 12436), True, 'import pyro.distributions as dist\n'), ((4588, 4605), 'pyro.distributions.Normal', 'dist.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (4599, 4605), True, 'import pyro.distributions as dist\n'), ((20556, 20577), 'pyro.distributions.Normal', 'dist.Normal', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (20567, 20577), True, 'import pyro.distributions as dist\n'), ((20660, 20682), 'pyro.distributions.Normal', 'dist.Normal', (['(0.0)', '(10.0)'], {}), '(0.0, 10.0)\n', (20671, 20682), True, 'import pyro.distributions as dist\n'), ((21910, 21931), 'pyro.distributions.Normal', 'dist.Normal', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (21921, 21931), True, 'import pyro.distributions as dist\n'), ((22014, 22036), 'pyro.distributions.Normal', 'dist.Normal', (['(0.0)', '(10.0)'], {}), '(0.0, 10.0)\n', (22025, 22036), True, 'import pyro.distributions as dist\n'), ((3790, 3807), 'pyro.distributions.Normal', 'dist.Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (3801, 3807), True, 'import pyro.distributions as dist\n')] |
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
__date__ = "05/2019"
__license__ = "MIT"
import cv2, pytesseract, imutils, logging
import numpy as np
from skimage.measure import compare_ssim as ssim
from consts import *
logger = logging.getLogger('')
def sort_contours(cnts, method="left-to-right"):
"""
Source https://www.pyimagesearch.com/2015/04/20/sorting-contours-using-python-and-opencv/
Arguments:
cnts {[type]} -- [description]
Keyword Arguments:
method {str} -- [description] (default: {"left-to-right"})
"""
reverse = False
i = 0
if method == "right-to-left" or method == "bottom-to-top":
reverse = True
if method == "top-to-bottom" or method == "bottom-to-top":
i = 1
boundingBoxes = [cv2.boundingRect(c) for c in cnts]
(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes), key=lambda b:b[1][i], reverse=reverse))
return (cnts, boundingBoxes)
def preprocess_image(image):
"""Preprocess image for better contours detection
Transofrm text blocks into almost rectangles
Arguments:
image {cv2.image} -- CV2 image object
Returns:
cv2.image -- Processed CV2 image object
"""
new_image = image.copy()
new_image = cv2.cvtColor(new_image, cv2.COLOR_BGR2GRAY)
new_image = cv2.GaussianBlur(new_image,(15,15), 0)
ret3, new_image = cv2.threshold(new_image,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# _, new_image = cv2.threshold(new_image, 0, 255, cv2.THRESH_BINARY_INV)
kernel = np.ones((7, 7),np.uint8)
new_image = cv2.dilate(new_image, kernel, iterations = 3)
return new_image
def preprocess_roi(image, padding=0, scaling_factor=1):
"""Processes image for beter text detection witht Google Tesseract
Based on: https://docs.opencv.org/3.4.0/d7/d4d/tutorial_py_thresholding.html
Arguments:
image {cv2.image} -- cv2 image
Keyword Arguments:
padding {int} -- padding from all image sides (default: {0})
scaling_factor {int} -- resize factor (default: {1})
Returns:
cv2.image -- processed cv2 image object
"""
roi = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
roi = cv2.resize(roi, (image.shape[:2][1]*scaling_factor, image.shape[:2][0]*scaling_factor), interpolation=cv2.INTER_CUBIC)
blur = cv2.GaussianBlur(roi,(3,3),0)
_,roi = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
roi = cv2.copyMakeBorder(roi, padding, padding, padding, padding, cv2.BORDER_CONSTANT, value=(255,255,255))
return roi
def find_countours(image):
"""Get contours on the image
Arguments:
image {cv2.image} -- processed image
Returns:
list -- contours
"""
proc_image = image.copy()
# proc_image = cv2.cvtColor(proc_image, cv2.COLOR_BGR2GRAY)
cntrs = cv2.findContours(proc_image.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cntrs = imutils.grab_contours(cntrs)
cntrs = sort_contours(cntrs, method="top-to-bottom")[0]
return cntrs
def draw_countrous(image, cntrs):
"""Draw regions of interest contours on the image
Arguments:
image {cv2.image} -- original image
cntrs {list} -- contours object
Returns:
cv2.image -- image with drawn contours on it
"""
out_image = image.copy()
for c in cntrs:
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(out_image, (x, y), (x + w, y + h), (0, 255, 0), 2)
return out_image
def get_rois(image, cntrs):
"""Extract regions of interestt from the image based on contours
Original image shall be provided
Arguments:
image {cv2.image} -- cv2 image object
cntrs {list} -- [description]
Returns:
list -- list of cv2 images
"""
rois = []
for c in cntrs:
(x, y, w, h) = cv2.boundingRect(c)
roi = image[y:y + h, x:x + w]
rois.append((roi, (y, y + h, x, x+w)))
return rois
def recognise_rois(rois, padding=0, scaling_factor=1, debug=False):
"""Recognize text on the image roi with Goolge Tesseract
Arguments:
rois {cv2.image} -- CV2 image object
Keyword Arguments:
padding {int} -- padding from all four sides of the image (default: {0})
scaling_factor {int} -- rescaling factor to resize image (default: {1})
debug {bool} -- enable debug output (default: {False})
Returns:
list -- text on the image as list
"""
values = []
triggered = False
for i in range(len(rois)):
r, pos = rois[i]
image = preprocess_roi(r, padding, scaling_factor)
text = pytesseract.image_to_string(image, config=TESSERACT_CONF)
if triggered:
logger.debug("ROI {}: {}".format(i, text))
if debug:
cv2.imwrite("%s/%d.png"%(DEBUG_FOLDER,i), image)
values.append((text, pos))
if len(values) == len(EXPECTED_KEYS)*2:
break
triggered = triggered or str(text).lower().find(TRIGGER_WORD) > -1
if not triggered:
logger.warning("Trigger not found")
return values
def get_video_n_frames(filename):
vidcap = cv2.VideoCapture(filename)
n_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = int(vidcap.get(cv2.CAP_PROP_FPS))
vidcap.release()
return n_frames, fps
def open_video_file(filename):
return cv2.VideoCapture(filename)
def get_video_frame(filename, frame_n):
vidcap = cv2.VideoCapture(filename)
vidcap.set(cv2.CAP_PROP_POS_FRAMES,frame_n)
frame_n = int(vidcap.get(0))
_, frame = vidcap.read()
vidcap.release()
return frame_n, frame
def get_video_frames(filename, output="images", frames=None, middle_frame=None, tolerance=0.98, middle_tolerance=0.7):
"""Get individual frames from the video file, skipping images which
are similar by structurual similarity (SSIM) by more than tolerance
Arguments:
filename {str} -- path to video file
Keyword Arguments:
output {str} -- output folder path (default: {"images"})
tolerance {float} -- tolerance to skip similar images (default: {0.98})
Returns:
(int, list) -- (number frames, file names)
"""
simple_read = False
if frames is None:
n_frames, _ = get_video_n_frames(filename)
frames = range(n_frames)
simple_read = True
vidcap = cv2.VideoCapture(filename)
last_image = None
frame_list = []
count = 0
for f in frames:
save_fn = "%s/frame%d.png"%(output,f)
if not simple_read:
vidcap.set(cv2.CAP_PROP_POS_FRAMES,f)
success, image = vidcap.read()
if not success:
if simple_read:
break
else:
logger.warning("Failed reading frame %d"%f)
continue
if middle_frame is not None and ssim(middle_frame, image, multichannel=True) < middle_tolerance:
continue
if last_image is None or ssim(last_image, image, multichannel=True) < tolerance:
frame_list.append(save_fn)
count += 1
cv2.imwrite(save_fn, image)
last_image = image.copy()
vidcap.release()
return count, frame_list | [
"cv2.resize",
"cv2.GaussianBlur",
"skimage.measure.compare_ssim",
"cv2.dilate",
"cv2.cvtColor",
"cv2.imwrite",
"cv2.threshold",
"cv2.copyMakeBorder",
"numpy.ones",
"pytesseract.image_to_string",
"cv2.VideoCapture",
"cv2.rectangle",
"imutils.grab_contours",
"cv2.boundingRect",
"logging.ge... | [((307, 328), 'logging.getLogger', 'logging.getLogger', (['""""""'], {}), "('')\n", (324, 328), False, 'import cv2, pytesseract, imutils, logging\n'), ((1341, 1384), 'cv2.cvtColor', 'cv2.cvtColor', (['new_image', 'cv2.COLOR_BGR2GRAY'], {}), '(new_image, cv2.COLOR_BGR2GRAY)\n', (1353, 1384), False, 'import cv2, pytesseract, imutils, logging\n'), ((1401, 1441), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['new_image', '(15, 15)', '(0)'], {}), '(new_image, (15, 15), 0)\n', (1417, 1441), False, 'import cv2, pytesseract, imutils, logging\n'), ((1462, 1535), 'cv2.threshold', 'cv2.threshold', (['new_image', '(0)', '(255)', '(cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)'], {}), '(new_image, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n', (1475, 1535), False, 'import cv2, pytesseract, imutils, logging\n'), ((1621, 1646), 'numpy.ones', 'np.ones', (['(7, 7)', 'np.uint8'], {}), '((7, 7), np.uint8)\n', (1628, 1646), True, 'import numpy as np\n'), ((1662, 1705), 'cv2.dilate', 'cv2.dilate', (['new_image', 'kernel'], {'iterations': '(3)'}), '(new_image, kernel, iterations=3)\n', (1672, 1705), False, 'import cv2, pytesseract, imutils, logging\n'), ((2243, 2282), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (2255, 2282), False, 'import cv2, pytesseract, imutils, logging\n'), ((2293, 2419), 'cv2.resize', 'cv2.resize', (['roi', '(image.shape[:2][1] * scaling_factor, image.shape[:2][0] * scaling_factor)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(roi, (image.shape[:2][1] * scaling_factor, image.shape[:2][0] *\n scaling_factor), interpolation=cv2.INTER_CUBIC)\n', (2303, 2419), False, 'import cv2, pytesseract, imutils, logging\n'), ((2423, 2455), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['roi', '(3, 3)', '(0)'], {}), '(roi, (3, 3), 0)\n', (2439, 2455), False, 'import cv2, pytesseract, imutils, logging\n'), ((2465, 2529), 'cv2.threshold', 'cv2.threshold', (['blur', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (2478, 2529), False, 'import cv2, pytesseract, imutils, logging\n'), ((2535, 2643), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['roi', 'padding', 'padding', 'padding', 'padding', 'cv2.BORDER_CONSTANT'], {'value': '(255, 255, 255)'}), '(roi, padding, padding, padding, padding, cv2.\n BORDER_CONSTANT, value=(255, 255, 255))\n', (2553, 2643), False, 'import cv2, pytesseract, imutils, logging\n'), ((3027, 3055), 'imutils.grab_contours', 'imutils.grab_contours', (['cntrs'], {}), '(cntrs)\n', (3048, 3055), False, 'import cv2, pytesseract, imutils, logging\n'), ((5296, 5322), 'cv2.VideoCapture', 'cv2.VideoCapture', (['filename'], {}), '(filename)\n', (5312, 5322), False, 'import cv2, pytesseract, imutils, logging\n'), ((5513, 5539), 'cv2.VideoCapture', 'cv2.VideoCapture', (['filename'], {}), '(filename)\n', (5529, 5539), False, 'import cv2, pytesseract, imutils, logging\n'), ((5594, 5620), 'cv2.VideoCapture', 'cv2.VideoCapture', (['filename'], {}), '(filename)\n', (5610, 5620), False, 'import cv2, pytesseract, imutils, logging\n'), ((6528, 6554), 'cv2.VideoCapture', 'cv2.VideoCapture', (['filename'], {}), '(filename)\n', (6544, 6554), False, 'import cv2, pytesseract, imutils, logging\n'), ((848, 867), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (864, 867), False, 'import cv2, pytesseract, imutils, logging\n'), ((3477, 3496), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (3493, 3496), False, 'import cv2, pytesseract, imutils, logging\n'), ((3505, 3569), 'cv2.rectangle', 'cv2.rectangle', (['out_image', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(out_image, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (3518, 3569), False, 'import cv2, pytesseract, imutils, logging\n'), ((3949, 3968), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (3965, 3968), False, 'import cv2, pytesseract, imutils, logging\n'), ((4754, 4811), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (['image'], {'config': 'TESSERACT_CONF'}), '(image, config=TESSERACT_CONF)\n', (4781, 4811), False, 'import cv2, pytesseract, imutils, logging\n'), ((7262, 7289), 'cv2.imwrite', 'cv2.imwrite', (['save_fn', 'image'], {}), '(save_fn, image)\n', (7273, 7289), False, 'import cv2, pytesseract, imutils, logging\n'), ((4927, 4978), 'cv2.imwrite', 'cv2.imwrite', (["('%s/%d.png' % (DEBUG_FOLDER, i))", 'image'], {}), "('%s/%d.png' % (DEBUG_FOLDER, i), image)\n", (4938, 4978), False, 'import cv2, pytesseract, imutils, logging\n'), ((7013, 7057), 'skimage.measure.compare_ssim', 'ssim', (['middle_frame', 'image'], {'multichannel': '(True)'}), '(middle_frame, image, multichannel=True)\n', (7017, 7057), True, 'from skimage.measure import compare_ssim as ssim\n'), ((7132, 7174), 'skimage.measure.compare_ssim', 'ssim', (['last_image', 'image'], {'multichannel': '(True)'}), '(last_image, image, multichannel=True)\n', (7136, 7174), True, 'from skimage.measure import compare_ssim as ssim\n')] |
from kernel_tuner import tune_kernel
import numpy
import argparse
import json
def generate_code(tuning_parameters):
code = \
"__global__ void fct_ale_b1_vertical(const int maxLevels, const int * __restrict__ nLevels, const <%REAL_TYPE%> * __restrict__ fct_adf_v, <%REAL_TYPE%> * __restrict__ fct_plus, <%REAL_TYPE%> * __restrict__ fct_minus)\n" \
"{\n" \
"const <%INT_TYPE%> node = (blockIdx.x * maxLevels);\n" \
"const <%INT_TYPE%> maxNodeLevel = nLevels[blockIdx.x] - 1;\n" \
"\n" \
"for ( <%INT_TYPE%> level = threadIdx.x; level < maxNodeLevel; level += <%BLOCK_SIZE%> )\n" \
"{\n" \
"<%REAL_TYPE%> fct_adf_v_level = 0.0;\n" \
"<%REAL_TYPE%> fct_adf_v_nlevel = 0.0;\n" \
"<%COMPUTE_BLOCK%>" \
"}\n" \
"}\n"
compute_block = \
"fct_adf_v_level = fct_adf_v[node + level + <%OFFSET%>];\n" \
"fct_adf_v_nlevel = fct_adf_v[node + (level + 1) + <%OFFSET%>];\n" \
"fct_plus[node + level + <%OFFSET%>] = <%FMAX%>(0.0, fct_adf_v_level) + <%FMAX%>(0.0, -fct_adf_v_nlevel);\n" \
"fct_minus[node + level + <%OFFSET%>] = <%FMIN%>(0.0, fct_adf_v_level) + <%FMIN%>(0.0, -fct_adf_v_nlevel);\n"
if tuning_parameters["tiling_x"] > 1:
code = code.replace("<%BLOCK_SIZE%>", str(tuning_parameters["block_size_x"] * tuning_parameters["tiling_x"]))
else:
code = code.replace("<%BLOCK_SIZE%>", str(tuning_parameters["block_size_x"]))
compute = str()
for tile in range(0, tuning_parameters["tiling_x"]):
if tile == 0:
compute = compute + compute_block.replace(" + <%OFFSET%>", "")
else:
offset = tuning_parameters["block_size_x"] * tile
compute = compute + "if ( level + {} < maxNodeLevel )\n{{\n{}}}\n".format(str(offset), compute_block.replace("<%OFFSET%>", str(offset)))
code = code.replace("<%COMPUTE_BLOCK%>", compute)
if tuning_parameters["real_type"] == "float":
code = code.replace("<%FMAX%>", "fmaxf")
code = code.replace("<%FMIN%>", "fminf")
elif tuning_parameters["real_type"] == "double":
code = code.replace("<%FMAX%>", "fmax")
code = code.replace("<%FMIN%>", "fmin")
else:
raise ValueError
code = code.replace("<%INT_TYPE%>", tuning_parameters["int_type"].replace("_", " "))
code = code.replace("<%REAL_TYPE%>", tuning_parameters["real_type"])
return code
def generate_code_shared(tuning_parameters):
code = \
"__global__ void fct_ale_b1_vertical(const int maxLevels, const int * __restrict__ nLevels, const <%REAL_TYPE%> * __restrict__ fct_adf_v, <%REAL_TYPE%> * __restrict__ fct_plus, <%REAL_TYPE%> * __restrict__ fct_minus)\n" \
"{\n" \
"const <%INT_TYPE%> node = (blockIdx.x * maxLevels);\n" \
"const <%INT_TYPE%> maxNodeLevel = nLevels[blockIdx.x];\n" \
"extern __shared__ <%REAL_TYPE%> fct_adf_v_local[];\n" \
"\n" \
"for ( <%INT_TYPE%> level = threadIdx.x; level < maxNodeLevel; level += <%BLOCK_SIZE%> )\n" \
"{\n" \
"<%LOAD_BLOCK%>" \
"}\n" \
"__syncthreads();\n" \
"for ( <%INT_TYPE%> level = threadIdx.x; level < maxNodeLevel - 1; level += <%BLOCK_SIZE%> )\n" \
"{\n" \
"<%COMPUTE_BLOCK%>" \
"}\n" \
"}\n"
load_block = \
"fct_adf_v_local[level + <%OFFSET%>] = fct_adf_v[node + level + <%OFFSET%>];\n"
compute_block = \
"fct_plus[node + level + <%OFFSET%>] = <%FMAX%>(0.0, fct_adf_v_local[level + <%OFFSET%>]) + <%FMAX%>(0.0, -fct_adf_v_local[level + <%OFFSET%> + 1]);\n" \
"fct_minus[node + level + <%OFFSET%>] = <%FMIN%>(0.0, fct_adf_v_local[level + <%OFFSET%>]) + <%FMIN%>(0.0, -fct_adf_v_local[level + <%OFFSET%> + 1]);\n"
if tuning_parameters["tiling_x"] > 1:
code = code.replace("<%BLOCK_SIZE%>", str(tuning_parameters["block_size_x"] * tuning_parameters["tiling_x"]))
else:
code = code.replace("<%BLOCK_SIZE%>", str(tuning_parameters["block_size_x"]))
compute = str()
load = str()
for tile in range(0, tuning_parameters["tiling_x"]):
if tile == 0:
load = load + load_block.replace(" + <%OFFSET%>", "")
compute = compute + compute_block.replace(" + <%OFFSET%>", "")
else:
offset = tuning_parameters["block_size_x"] * tile
load = load + "if ( level + {} < maxNodeLevel )\n{{\n{}}}\n".format(str(offset), load_block.replace("<%OFFSET%>", str(offset)))
compute = compute + "if ( level + {} < maxNodeLevel - 1 )\n{{\n{}}}\n".format(str(offset), compute_block.replace("<%OFFSET%>", str(offset)))
code = code.replace("<%LOAD_BLOCK%>", load)
code = code.replace("<%COMPUTE_BLOCK%>", compute)
if tuning_parameters["real_type"] == "float":
code = code.replace("<%FMAX%>", "fmaxf")
code = code.replace("<%FMIN%>", "fminf")
elif tuning_parameters["real_type"] == "double":
code = code.replace("<%FMAX%>", "fmax")
code = code.replace("<%FMIN%>", "fmin")
else:
raise ValueError
code = code.replace("<%INT_TYPE%>", tuning_parameters["int_type"].replace("_", " "))
code = code.replace("<%REAL_TYPE%>", tuning_parameters["real_type"])
return code
def reference(nodes, levels, max_levels, fct_adf_v, fct_plus, fct_minus):
for node in range(0, nodes):
for level in range(0, levels[node] - 1):
item = (node * max_levels) + level
fct_plus[item] = 0.0
fct_minus[item] = 0.0
for node in range(0, nodes):
for level in range(0, levels[node] - 1):
item = (node * max_levels) + level
fct_plus[item] = fct_plus[item] + (max(0.0, fct_adf_v[item]) + max(0.0, -fct_adf_v[(node * max_levels) + level + 1]))
fct_minus[item] = fct_minus[item] + (min(0.0, fct_adf_v[item]) + min(0.0, -fct_adf_v[(node * max_levels) + level + 1]))
def tune(nodes, max_levels, max_tile, real_type, quiet=True):
numpy_real_type = None
if real_type == "float":
numpy_real_type = numpy.float32
elif real_type == "double":
numpy_real_type = numpy.float64
else:
raise ValueError
# Tuning and code generation parameters
tuning_parameters = dict()
tuning_parameters["shared_memory"] = [False]
tuning_parameters["int_type"] = ["unsigned_int", "int"]
tuning_parameters["real_type"] = [real_type]
tuning_parameters["max_levels"] = [str(max_levels)]
tuning_parameters["block_size_x"] = [32 * i for i in range(1, 33)]
tuning_parameters["tiling_x"] = [i for i in range(1, max_tile)]
constraints = list()
constraints.append("block_size_x * tiling_x <= max_levels")
# Memory allocation and initialization
fct_adf_v = numpy.random.randn(nodes * max_levels).astype(numpy_real_type)
fct_plus = numpy.zeros(nodes * max_levels).astype(numpy_real_type)
fct_minus = numpy.zeros_like(fct_plus).astype(numpy_real_type)
fct_plus_control = numpy.zeros_like(fct_plus).astype(numpy_real_type)
fct_minus_control = numpy.zeros_like(fct_minus).astype(numpy_real_type)
levels = numpy.zeros(nodes).astype(numpy.int32)
used_levels = 0
for node in range(0, nodes):
levels[node] = numpy.random.randint(3, max_levels)
used_levels = used_levels + (levels[node] - 1)
arguments = [numpy.int32(max_levels), levels, fct_adf_v, fct_plus, fct_minus]
# Reference
reference(nodes, levels, max_levels, fct_adf_v, fct_plus_control, fct_minus_control)
arguments_control = [None, None, None, fct_plus_control, fct_minus_control]
# Tuning
results, _ = tune_kernel("fct_ale_b1_vertical", generate_code, "{} * block_size_x".format(nodes), arguments, tuning_parameters, lang="CUDA", answer=arguments_control, restrictions=constraints, quiet=quiet)
# Memory bandwidth
memory_bytes = ((nodes * 4) + (used_levels * 4 * numpy.dtype(numpy_real_type).itemsize))
for result in results:
result["memory_bandwidth"] = memory_bytes / (result["time"] / 10**3)
# Shared memory version
shared_memory_args = dict()
tuning_parameters["shared_memory"] = [True]
shared_memory_args["size"] = max_levels * numpy.dtype(numpy_real_type).itemsize
results_shared, _ = tune_kernel("fct_ale_b1_vertical", generate_code_shared, "{} * block_size_x".format(nodes), arguments, tuning_parameters, smem_args=shared_memory_args, lang="CUDA", answer=arguments_control, restrictions=constraints, quiet=quiet)
# Memory bandwidth shared memory version
memory_bytes = ((nodes * 4) + (used_levels * 3 * numpy.dtype(numpy_real_type).itemsize))
for result in results_shared:
result["memory_bandwidth"] = memory_bytes / (result["time"] / 10**3)
return results + results_shared
def parse_command_line():
parser = argparse.ArgumentParser(description="FESOM2 FCT ALE B1 VERTICAL")
parser.add_argument("--nodes", help="The number of nodes.", type=int, required=True)
parser.add_argument("--max_levels", help="The maximum number of vertical levels per node.", type=int, required=True)
parser.add_argument("--max_tile", help="The maximum tiling factor.", type=int, default=2)
parser.add_argument("--real_type", help="The floating point type to use.", choices=["float", "double"], type=str, required=True)
parser.add_argument("--verbose", help="Print all kernel configurations.", default=True, action="store_false")
parser.add_argument("--store", help="Store performance results in a JSON file.", default=False, action="store_true")
return parser.parse_args()
if __name__ == "__main__":
command_line = parse_command_line()
results = tune(command_line.nodes, command_line.max_levels, command_line.max_tile, command_line.real_type, command_line.verbose)
best_configuration = min(results, key=lambda x : x["time"])
print("/* Memory bandwidth: {:.2f} GB/s */".format(best_configuration["memory_bandwidth"] / 10**9))
print("/* Block size X: {} */".format(best_configuration["block_size_x"]))
if best_configuration["shared_memory"]:
print(generate_code_shared(best_configuration))
else:
print(generate_code(best_configuration))
if command_line.store:
try:
with open("fct_ale_b1_vertical_{}_{}_{}.json".format(command_line.nodes, command_line.max_levels, command_line.real_type), "x") as fp:
json.dump(results, fp)
except FileExistsError:
print("Impossible to save the results, a results file already exists for a similar experiment.") | [
"json.dump",
"numpy.zeros_like",
"argparse.ArgumentParser",
"numpy.random.randn",
"numpy.dtype",
"numpy.zeros",
"numpy.random.randint",
"numpy.int32"
] | [((8833, 8898), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""FESOM2 FCT ALE B1 VERTICAL"""'}), "(description='FESOM2 FCT ALE B1 VERTICAL')\n", (8856, 8898), False, 'import argparse\n'), ((7261, 7296), 'numpy.random.randint', 'numpy.random.randint', (['(3)', 'max_levels'], {}), '(3, max_levels)\n', (7281, 7296), False, 'import numpy\n'), ((7369, 7392), 'numpy.int32', 'numpy.int32', (['max_levels'], {}), '(max_levels)\n', (7380, 7392), False, 'import numpy\n'), ((6782, 6820), 'numpy.random.randn', 'numpy.random.randn', (['(nodes * max_levels)'], {}), '(nodes * max_levels)\n', (6800, 6820), False, 'import numpy\n'), ((6860, 6891), 'numpy.zeros', 'numpy.zeros', (['(nodes * max_levels)'], {}), '(nodes * max_levels)\n', (6871, 6891), False, 'import numpy\n'), ((6932, 6958), 'numpy.zeros_like', 'numpy.zeros_like', (['fct_plus'], {}), '(fct_plus)\n', (6948, 6958), False, 'import numpy\n'), ((7006, 7032), 'numpy.zeros_like', 'numpy.zeros_like', (['fct_plus'], {}), '(fct_plus)\n', (7022, 7032), False, 'import numpy\n'), ((7081, 7108), 'numpy.zeros_like', 'numpy.zeros_like', (['fct_minus'], {}), '(fct_minus)\n', (7097, 7108), False, 'import numpy\n'), ((7146, 7164), 'numpy.zeros', 'numpy.zeros', (['nodes'], {}), '(nodes)\n', (7157, 7164), False, 'import numpy\n'), ((8216, 8244), 'numpy.dtype', 'numpy.dtype', (['numpy_real_type'], {}), '(numpy_real_type)\n', (8227, 8244), False, 'import numpy\n'), ((7918, 7946), 'numpy.dtype', 'numpy.dtype', (['numpy_real_type'], {}), '(numpy_real_type)\n', (7929, 7946), False, 'import numpy\n'), ((8606, 8634), 'numpy.dtype', 'numpy.dtype', (['numpy_real_type'], {}), '(numpy_real_type)\n', (8617, 8634), False, 'import numpy\n'), ((10412, 10434), 'json.dump', 'json.dump', (['results', 'fp'], {}), '(results, fp)\n', (10421, 10434), False, 'import json\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
import pandas as pd
from . import select_descriptor
from ..IO import pkl_data
from ..IO import read_input as rin
def initialize(stat, init_struc_data, rslt_data):
# ---------- log
print('\n# ---------- Initialize Bayesian optimization')
with open('cryspy.out', 'a') as fout:
fout.write('\n# ---------- Initilalize Bayesian optimization\n')
# ---------- check init_struc_data
if None in init_struc_data.values():
raise ValueError('init_struc_data includes None')
# ---------- initialize
gen = 1
id_done = np.array([], dtype=int)
targets = np.array([], dtype=float)
non_error_id = np.arange(len(init_struc_data))
# ---------- rslt_data
rslt_data['Gen'] = pd.Series(dtype=int)
rslt_data = rslt_data[['Gen', 'Struc_ID', 'Spg_num', 'Spg_sym', 'Spg_num_opt',
'Spg_sym_opt', 'Energy', 'Magmom', 'Opt']]
pkl_data.save_rslt(rslt_data)
# ---------- random select
id_to_calc = random_select(len(init_struc_data), rin.interval)
# ---------- calc descriptor
descriptors = select_descriptor.calc_X(init_struc_data)
# ---------- save for BO
bo_id_data = (gen, non_error_id, id_to_calc, id_done)
pkl_data.save_bo_id(bo_id_data)
bo_data = (descriptors, targets)
pkl_data.save_bo_data(bo_data)
# ---------- status
stat.set('status', 'generation', '{}'.format(gen))
stat.set('status', 'selected_id', '{}'.format(' '.join(str(a) for a in id_to_calc)))
stat.set('status', 'id_to_calc', '{}'.format(' '.join(str(a) for a in id_to_calc)))
with open('cryspy.stat', 'w') as fstat:
stat.write(fstat)
# ---------- out and log
print('Generation: {}'.format(gen))
print('selected_id: {}'.format(' '.join(str(a) for a in id_to_calc)))
with open('cryspy.out', 'a') as fout:
fout.write('Generation: {}\n'.format(gen))
fout.write('selected_id: {}\n\n'.format(' '.join(str(a) for a in id_to_calc)))
def random_select(length, n):
rnd_perm = np.random.permutation(xrange(length))
selected_id = rnd_perm[0:n]
return selected_id
| [
"numpy.array",
"pandas.Series"
] | [((662, 685), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (670, 685), True, 'import numpy as np\n'), ((700, 725), 'numpy.array', 'np.array', (['[]'], {'dtype': 'float'}), '([], dtype=float)\n', (708, 725), True, 'import numpy as np\n'), ((828, 848), 'pandas.Series', 'pd.Series', ([], {'dtype': 'int'}), '(dtype=int)\n', (837, 848), True, 'import pandas as pd\n')] |
# Copyright (c) 2019 <NAME> <<EMAIL>>
# Copyright (c) 2020 <NAME> <<EMAIL>>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import numpy as np
class ReplAtoms():
'''
Unused dictionary atoms replacement strategy
ZERO - return a zero column
RANDOM - return a random generated atom
NO - perform no replacement
WORST - replace ith the worst represented signal
'''
ZERO = 0
RANDOM = 1
NO = 2
WORST = 3
def _new_atom(Y, D, X, atom_index, replatoms):
'''
Replace unused atom j
INPUTS:
replatoms -- unused dictionary atoms replacement strategy
ZERO - return a zero column
RANDOM - return a random generated atom
NO - perform no replacement
WORST - replace ith the worst represented signal
Y -- training signals set
D -- current dictionary
X -- sparse representations
j -- atom's column index in the dictionary D
OUTPUTS:
atom -- replacement atom
'''
if replatoms == ReplAtoms.ZERO:
# Return a zero column
return np.zeros(D.shape[0])
if replatoms == ReplAtoms.RANDOM:
# Return a random generated atom
atom = np.random.rand(D.shape[0])
atom = atom / np.linalg.norm(atom)
return atom
if replatoms == ReplAtoms.NO:
# Perform no replacement
return D[:, atom_index]
if replatoms == ReplAtoms.WORST:
# Replace with the worst represented signal
E = Y - D @ X
index = np.argmax(np.linalg.norm(E, axis=0))
return Y[:, index] / np.linalg.norm(Y[:, index])
| [
"numpy.random.rand",
"numpy.linalg.norm",
"numpy.zeros"
] | [((1749, 1769), 'numpy.zeros', 'np.zeros', (['D.shape[0]'], {}), '(D.shape[0])\n', (1757, 1769), True, 'import numpy as np\n'), ((1865, 1891), 'numpy.random.rand', 'np.random.rand', (['D.shape[0]'], {}), '(D.shape[0])\n', (1879, 1891), True, 'import numpy as np\n'), ((1914, 1934), 'numpy.linalg.norm', 'np.linalg.norm', (['atom'], {}), '(atom)\n', (1928, 1934), True, 'import numpy as np\n'), ((2193, 2218), 'numpy.linalg.norm', 'np.linalg.norm', (['E'], {'axis': '(0)'}), '(E, axis=0)\n', (2207, 2218), True, 'import numpy as np\n'), ((2249, 2276), 'numpy.linalg.norm', 'np.linalg.norm', (['Y[:, index]'], {}), '(Y[:, index])\n', (2263, 2276), True, 'import numpy as np\n')] |
# Copyright <NAME> 2012-2020.
# Copyright <NAME> 2020. Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE.txt)
## @package testdriver
# This is the Python testdriver for the \ref testsuite.
#
# It is intended to be used with the CMake CTest utility.
# When called with the parameter `--testclass=<TESTCLASS>`, it calls the `run`
# method of the specified runner class. Success of a test is indicated by the
# return value 0.
import logging
from optparse import OptionParser
import configparser
import sys
import os
import errno
import subprocess
import numpy as np
import shutil
import ast
import scipy.interpolate
from scipy.integrate import quadrature
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.font_manager import FontProperties
plot=True
except ImportError:
plot=False
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s %(levelname)s %(message)s")
logging.getLogger('matplotlib.font_manager').disabled = True
## @name Helper functions
# @{
## Create a directory with parent directories.
# @param path The path to create.
#
# From this [stackoverflow question](http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python)
def mkdir_p(path):
os.makedirs(path,exist_ok=True)
## Remove a file without error if it doesn't exist.
# @param filename The file to delete.
#
# From this [stackoverflow question](http://stackoverflow.com/a/10840586)
def rm_f(filename):
try:
os.remove(filename)
except OSError as e: # this would be "except OSError, e:" before Python 2.6
if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory
raise # re-raise exception if a diff
## Loads a trajectory file.
# \param fname File name to load from.
# \return array Numpy array.
def load_sv(fname, format=None):
if format is None: return np.genfromtxt(fname)
floatingReString=r'([-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?)'
complexReString =r'\(\s*'+floatingReString+'\s*,\s*'+floatingReString+'\s*\)'
return np.fromregex(fname,format.replace(r'+',r'\s*').replace('f',floatingReString).replace('c',complexReString),float)
## @}
def PTLA_postprocess(input):
result=np.zeros((input.shape[0],6))
result[:,[0,1]]=input[:,[0,1]]
result[:,2]=(1+input[:,2])/2
result[:,3]=(1-input[:,2])/2
result[:,4]=input[:,3]/2
result[:,5]=input[:,4]/2
return result
def PLM_Evolved_postprocess(input):
result=np.zeros((input.shape[0],5))
result[:,[0,1]]=input[:,[0,1]]
result[:,2]=input[:,2]**2+input[:,3]**2
result[:,3]=input[:,2]
result[:,4]=input[:,3]
return result
## @defgroup TestclassHelpers Helpers
# @ingroup Testclasses
# \brief Helper base classes to test classes.
# These classes cannot be used as a test class directly, but serve as base to other test classes
# and define some \ref TestclassKeys "configuration file keys" and \ref TestclassOptions "command line options".
class OptionsManager(object):
"""!
@ingroup TestclassHelpers
\brief Stores command line options and configuration file keys.
Each OptionsManager instance has its own section in the configuration file, named after
the current test name (OptionsManager::test). If the current section has the key
`import=othersection`, import all keys from `othersection` if they are not present already
(works recursively). Values which end in `_local` are never imported.
\ref OptionsManager_options "Command line" options this class understands.
"""
## @addtogroup TestclassOptions
#
# @anchor OptionsManager_options
# ## OptionsManager command line options
# * `--test=<testname>`: The name of the test. This defines the section in the configuration file
# and also ends up in output files etc.
def __init__(self, options, cp):
"""!
@param options optparse.Values: object holding all the command line options.
@param cp ConfigParser: ConfigParser instance holding all configuration file keys.
"""
## optparse.Values: command line options
self.options = options
## ConfigParser: configuration file keys
self.cp = cp
## The name of the current test
self.test = options.test
if not self.test: sys.exit('--test missing')
def _import_section(self,section=None):
if section is None: section = self.test
if self.cp.has_option(section,'import'):
import_section=self.cp.get(section,'import')
self._import_section(section=import_section) # import recursively
for item in self.cp.items(import_section):
if not self.cp.has_option(section,item[0]) and not item[0].endswith('_local'):
self.cp.set(section, *item)
self.cp.remove_option(section,'import')
def get_option(self, name, default=None, required=False, section=None):
"""!
Get configuration file keys in a safe way.
\param name Name of the key.
\param default Default value to return if key does not exist.
\param required Fail if True and key does not exist.
\param section The section name to look in, defaults to OptionsManager::test if None.
\return The value to the key.
This methods looks up the key `name` in the section name OptionsManager::test.
"""
if section is None: section=self.test
self._import_section(section=section)
if self.cp.has_option(section,name):
return self.cp.get(section,name)
else:
if not required: return default
else: sys.exit("Error: required option \"{0}\" not found in section {1}.".format(name,section))
class OutputManager(OptionsManager):
"""!
@ingroup TestclassHelpers
\brief Manages output files for different run modes.
\ref OutputManager_keys "Configuration file keys" this class understands.
"""
## @addtogroup SetupKeys
#
# * `outuptdir`: All output files end up here.
# * `expecteddir`: Where to look for pre-run simulations to compare test runs to.
## @addtogroup TestclassKeys
#
# @anchor OutputManager_keys
# ## OutputManager configuration file keys
# * `runmodes`: comma separated list of runmodes (single, master ensemble)
def __init__(self, *args, **kwargs):
"""!
Arguments are passed through to OptionsManager.
"""
OptionsManager.__init__(self, *args, **kwargs)
## All output files end up here.
self.outputdir = self.cp.get('Setup','outputdir')
## Where to look for pre-run simulations to compare test runs to.
self.expecteddir = self.cp.get('Setup','expecteddir')
mkdir_p(self.outputdir)
def runmodes(self,section=None):
"""!
Return runmodes.
\param section (optional) String: Where to look up the runmodes, take current test section if not specified.
\return A list of runmodes in this section.
"""
if section is None: section=self.test
return self.get_option('runmodes', section=section, default='generic').split(',')
def _filter_runmodes(self, section):
filter_runmodes=self.get_option('runmodes_'+self.test+'_local',section=section)
if not filter_runmodes is None: filter_runmodes=filter_runmodes.split(',')
for mode in self.runmodes(section=section):
if not filter_runmodes is None and not mode in filter_runmodes: continue
yield(mode)
def output(self, runmode, section=None, statefile=False):
"""!
The name of the output file for a given runmode.
\param runmode String: The runmode for which the filename should be generated.
\param section (optional) String: Output file name for which section, current test section if left empty.
\param statefile (optional) Boolean: By default generate the file name for a trajectory file. If set to true
generate the file name for a state file.
\return Full path including OutputManager::outputdir.
"""
if section is None: section=self.test
if runmode == "generic":
output = os.path.join(self.outputdir, section)
else:
output = os.path.join(self.outputdir, section+'_'+runmode)
if statefile: output+=".state"
return output
def clean(self, runmode):
"""!
Delete the trajectory file and state file for a given runmode.
\param runmode String: The runmode for which output files should be deleted.
"""
rm_f(self.output(runmode))
rm_f(self.output(runmode,statefile=True))
# The test classes
class Runner(OutputManager):
"""!
@ingroup Testclasses
Runs a script repeatedly for all declared runmodes and succeeds if the scripts do.
\ref Runner_keys "Configuration file keys" this class understands.
"""
def run(self, clean=True, extra_opts=None, interpreter=None, *args, **kwargs):
"""!
The method to run the test.
\param clean (optional) `Boolean`: Whether to remove old output before running the test.
\param extra_opts (optional) `List`: Additional command line options appended to the script call.
\param interpreter (optional) `str`: Interpreter to run the command through, e.g. `python`.
\param args passed through to `subprocess.call`
\param kwargs passed through to `subprocess.call`
This method terminates the test driver with a return value equal to that of the script call
if one of the scripts fail.
"""
for runmode in self.runmodes():
if clean: self.clean(runmode)
command = self._build_commandline(runmode,extra_opts,interpreter)
logging.debug(subprocess.list2cmdline(command))
ret = subprocess.call(command, *args, **kwargs)
if not ret==0: sys.exit(ret)
## @addtogroup TestclassKeys
#
# @anchor Runner_keys
# ## Runner configuration file keys
# * `opts*`: The command line options used for running the script, multiple keys matching `opts*` can be given
# * `single*`, `master*`, `ensemble*`: Additional options for the specific runmodes. Multiple keys
# matching `<runmode>*` can be given.
#
# Example usage:
#
# # The options used for running the scripts, multiple keys can be given if they match opts*
# opts=--etat 8 --sdf 3
# opts1=--dc 0 --Dt 0.1 --NDt 10
#
# # runmode specific options
# single=...
# single1=...
# ensemble=...
# master=...
def _extend_opts(self, options, section, option_prefix):
for option in sorted([ item[0] for item in self.cp.items(section) if item[0].startswith(option_prefix)]):
options.extend(self.cp.get(section,option).split())
def _build_commandline(self, runmode, extra_opts=None, interpreter=None):
result = [interpreter] if not interpreter is None else []
result.append(self.options.script)
if extra_opts: result+=extra_opts
## @addtogroup SetupKeys
#
# * `opts`: Script command line options added to all scripts
self._extend_opts(result, 'Setup','opts')
self._extend_opts(result, self.test,'opts')
self._extend_opts(result, self.test,runmode)
if not runmode=="generic": result.extend(('--evol',runmode))
result.extend(('--o',self.output(runmode)))
return result
class PythonRunner(Runner):
"""!
@ingroup Testclasses
Runs a cpypyqed script repeatedly for all declared runmodes and succeeds if the scripts do.
\ref PythonRunner_options "Configuration file keys" this class understands.
"""
## @addtogroup TestclassOptions
#
# @anchor PythonRunner_options
# ## PythonRunner command line options
# * `--cpypyqed_builddir=<dir>`: Directory for on-demand compilation
# * `--cpypyqed_config=<config-file>`: Configuration file for on-demand compilation
def run(self, clean=True, extra_opts=None, *args, **kwargs):
"""!
The method to run the test.
\param clean (optional) `Boolean`: Whether to remove old output before running the test.
\param extra_opts (optional) `List`: Additional command line options appended to the script call.
\param args passed through to Runner.run()
\param kwargs passed through to Runner.run()
This method terminates the test driver with a return value equal to that of the script call
if one of the scripts fail.
"""
cpypyqed_builddir = self.options.cpypyqed_builddir
cpypyqed_config = self.options.cpypyqed_config
env = os.environ.copy()
if cpypyqed_builddir:
env['CPYPYQED_BUILDDIR']=cpypyqed_builddir
if clean: shutil.rmtree(os.path.join(cpypyqed_builddir,'cppqedmodules'),ignore_errors=True)
if cpypyqed_config: env['CPYPYQED_CONFIG']=cpypyqed_config
env['PYTHONPATH']=self.cp.get('Setup','modulepath')
if extra_opts is None: extra_opts = []
if self.options.configuration.lower()=="debug": extra_opts += ['--debug']
Runner.run(self,clean=clean,extra_opts=extra_opts,interpreter=sys.executable,env=env,*args,**kwargs)
class Verifier(OutputManager):
"""!
@ingroup Testclasses
Verifies the output of a script 'this' to an expected output or the output of some other test run 'other'
\ref Verifier_keys "Configuration file keys" this class understands.
"""
## @addtogroup TestclassKeys
#
# @anchor Verifier_keys
# ## Verifier configuration file keys
# The Verifier compares some test 'this' to another test 'other'.
# * `this`: Test name of 'this', by default the current test if missing
# * `other`: Testname of 'other', by default the results from the directory of expected results
# (OutputManager::expecteddir)
# * `verify`: Verify that both trajectories are exactly equal (default if this key is missing or
# `verify=full`), or verify that the last outcome of the simulation is equal, e.g. timesteps may differ
# (`verify=outcome`)
#
# If `this=some_test` is specified, it is probably also a good idea to `import=some_test` to keep
# the runmodes in sync. Currently the directory of expected results is `Testing/expected`, it is kept
# under version control so that changes in the output of the scripts are noticed.
def __init__(self,*args,**kwargs):
"""!
\param args passed through to OutputManager
\param kwargs passed through to OutputManager
"""
OutputManager.__init__(self,*args,**kwargs)
self.thisSection = self.get_option('this',default=self.test)
self.otherSection = self.get_option('other')
def run(self):
"""!
Run the test.
"""
mode=self.get_option('verify')
if mode is None or mode=='full':
self._verify_full()
elif mode=='outcome':
self._verify_outcome()
def _verify_full(self):
for runmode in self.runmodes(section=self.thisSection):
self._verify_ev(self._thisOutput(runmode),self._otherOutput(runmode))
self._verify_state(self._thisOutput(runmode,statefile=True),self._otherOutput(runmode,statefile=True))
def _thisOutput(self,runmode,statefile=False):
return self.output(runmode,section=self.thisSection,statefile=statefile)
def _otherOutput(self,runmode,statefile=False):
if self.otherSection is None:
return os.path.join(self.expecteddir,os.path.basename(self._thisOutput(runmode,statefile)))
else:
return self.output(runmode,section=self.otherSection,statefile=statefile)
def _differ(self,this,other):
sys.exit("Error: {0} and {1} differ.".format(this,other))
def _equiv(self,this,other):
logging.debug("{0} and {1} are equivalent.".format(this,other))
def _verify_ev(self,this,other):
if not np.allclose(load_sv(this),load_sv(other)): self._differ(this,other)
else: self._equiv(this,other)
def _verify_state(self,this,other):
_,r_state,r_time = io.read(this)
_,e_state,e_time = io.read(other)
if not (np.allclose(r_state,e_state) and np.allclose(r_time,e_time)): self._differ(this,other)
else: self._equiv(this,other)
def _verify_outcome(self,this,other):
_,r_state,r_time=io.read(this)
_,e_state,e_time=io.read(other)
if not (np.allclose(r_state[-1],e_state[-1]) and np.allclose(r_time[-1],e_time[-1])):
self._differ(this,other)
else: self._equiv(this,other)
class VerifiedRunner(Runner,Verifier):
"""!
@ingroup Testclasses
Combines the functionality of Runner and Verifier to a single test.
"""
def run(self):
"""!
Run the test.
"""
Runner.run(self)
Verifier.run(self)
class GenericContinuer(OptionsManager):
"""!
@ingroup TestclassHelpers
This class hosts continued_run(), which will run and then continue a script.
"""
## @addtogroup TestclassKeys
#
# @anchor GenericContinuer_keys
# ## GenericContinuer configuration file keys
# * `firstrun`: script options for the first run
# * `secondrun`: script options for the second run
def continued_run(self, runfn, *args, **kwargs):
"""!
Run, then continue a script.
\param runfn Function: The run function to call.
\param args passed through to `runfn`
\param kwargs passed through to `runfn`
"""
runfn(self, extra_opts=self.get_option('firstrun',default='').split(), *args, **kwargs)
runfn(self, clean=False, extra_opts=self.get_option('secondrun',default='').split(), *args, **kwargs)
class Continuer(Runner, GenericContinuer):
"""!
@ingroup Testclasses
GenericContinuer version of Runner.
\ref GEnericContinuer_keys "Configuration file keys" this class understands.
"""
## @addtogroup TestclassKeys
#
# @anchor Continuer
# ## Continuer configuration file keys
# See \ref GenericContinuer_keys "GenericContinuer keys".
def run(self, *args, **kwargs):
"""!
Delegates to GenericContinuer::continued_run().
"""
GenericContinuer.continued_run(self, Runner.run, *args, **kwargs)
class PythonContinuer(PythonRunner, GenericContinuer):
"""!
@ingroup Testclasses
GenericContinuer version of PythonRunner.
\ref GEnericContinuer_keys "Configuration file keys" this class understands.
"""
## @addtogroup TestclassKeys
#
# @anchor PythonContinuer
# ## PythonContinuer configuration file keys
# See \ref GenericContinuer_keys "GenericContinuer keys".
def run(self, *args, **kwargs):
"""!
Delegates to GenericContiuer::continued_run().
"""
GenericContinuer.continued_run(self, PythonRunner.run, *args, **kwargs)
class CompileTarget(OptionsManager):
"""!
@ingroup Testclasses
\brief This test tries to compile a %CMake target.
If the `--error` option is not given,
the test succeeds if the target can be compiled, otherwise the test succeeds if the
compilation fails and the string specified together with `--error` is found.
\ref CompileTarget_options "Command line options" this class understands.
"""
## @addtogroup SetupKeys
#
# * `cmake`: Path of the cmake executable
# * `builddir`: Top-level build directory
# * `
## @addtogroup TestclassOptions
#
# @anchor CompileTarget_options
# ## CompileTarget command line options
# * `--script`: The name of the target to compile.
## @addtogroup TestclassKeys
#
# @anchor CompileTarget_keys
# ## CompileTarget configuration file keys
# * `error`: Turn on "failure mode". The error message which is expected in the output.
# * `dependencies`: Space separated list of dependencies to compile first. These are
# always required to succeed, independent of the presence of `error`.
def run(self):
"""!
Runs the test.
"""
error=self.get_option('error')
cmake=self.cp.get('Setup','cmake')
builddir=self.cp.get('Setup','builddir')
command=[cmake,'--build',builddir,'--target']
dependencies=self.get_option('dependencies',default="").split()
for dep in dependencies:
logging.debug(subprocess.list2cmdline(command+[dep]))
p = subprocess.Popen(command+[dep], stdout=subprocess.PIPE,stderr=subprocess.PIPE)
(std,err) = p.communicate()
if not p.returncode==0:
sys.exit("Compilation of dependency {0} for {1} failed.".format(dep,self.options.script))
logging.debug(subprocess.list2cmdline(command+[self.options.script]))
p = subprocess.Popen(command+[self.options.script], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(std,err) = p.communicate()
returncode = p.returncode
if error is None:
if returncode != 0:
sys.exit("Compilation of {0} failed.".format(self.options.script))
else:
if returncode == 0:
sys.exit("Compilation was successful, but failure was expected.")
if (not error in std) and (not error in err):
logging.debug(std)
logging.debug(err)
sys.exit("Compilation failed as expected, but \"{0}\" was not found in the error message.".format(error))
class Plotter(OutputManager):
"""!
\brief This is a helper class which helps with plotting functions to a pdf file.
If the global variable `plot` is False, all functions are a no-op.
"""
def _plot(self):
return plot and not self.get_option('pdf') is None
def start_pdf(self):
"""!
\brief Initialize a new pdf file.
The file is read from the configuration key `pdf`.
"""
if not self._plot(): return
self.pdf = PdfPages(os.path.join(self.outputdir,self.get_option('pdf')))
def close_pdf(self):
"""!
\brief Saves the pdf file to disc after all plots are finished.
"""
if not self._plot(): return
for n in plt.get_fignums():
plt.figure(num=n)
self._place_legend()
self.finish_plot()
self.pdf.close()
def finish_plot(self):
"""!
\brief Adds the current plot to the pdf file.
"""
if not self._plot(): return
self.pdf.savefig()
plt.close()
def figureLegendRight(self,ylabel,title,n):
"""!
\brief Creates a new plot with figure legend right of the plot.
\param ylabel The label of the y axis.
\param title The title of the plot
\param n The value number.
"""
if not self._plot(): return
if n in plt.get_fignums():
plt.figure(num=n)
return
f = plt.figure(num=n,figsize=(11.6,8.2))
f.add_axes([0.09, 0.1, 0.6, 0.75])
plt.title(title)
plt.ylabel(ylabel)
plt.xlabel('t')
def _place_legend(self):
if not self._plot(): return
fontP = FontProperties()
fontP.set_size('small')
leg=plt.legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.,prop = fontP)
llines=leg.get_lines()
plt.setp(llines, linewidth=1.5)
def plot(self,time,data,**kwargs):
"""!
\brief Wraps matplotlibs plot function.
\param time An array of time values.
\param data An array of data values.
\param **kwargs These are passed to `matplotlib.plot`.
"""
if not self._plot(): return
plt.plot(time,data,**kwargs)
def final_temperature(nTh):
def fn(states):
state=states[-1]
n=np.arange(state.shape[0],dtype=float)
expected_rho=np.diag(nTh**n/(1.+4)**(n+1))
return np.sqrt(np.sum(np.abs(state-expected_rho)**2))
return fn
class StateComparer(OutputManager):
"""!
@ingroup Testclasses
Tests final states of several trajectories by applying a given function.
\ref StateComparer_keys "Configuration file keys" this class understands.
"""
## @addtogroup TestclassKeys
#
# @anchor StateComparer_keys
# ## StateComparer configuration file keys
# * `trajectories`: List of comma-separated trajectories which should be tested.
# * `function`: A meta-function which should return the actual test function. The actual test function
# should accept the state array and return some epsilon value (the measure of the test).
# * `parameters`: Tuple of function parameters passed to the meta function.
#
# The following configuration keys are read from the 'target'-sections.
# * `runmodes_<test>`: For the compare test <test>, only use these runmodes.
# * `epsilon_<runmode>_<test>`: Acceptable deviation for the given runmode and comparison test.
def run(self):
trajectories=self.get_option('trajectories',required=True).split(',')
function=globals()[self.get_option('function',required=True)]
parameters=ast.literal_eval(self.get_option('parameters'))
if parameters is None: parameters=[]
failure=False
for traj in trajectories:
for runmode in self._filter_runmodes(section=traj):
statefile=self.output(runmode=runmode,section=traj,statefile=True)
_,states,_=io.read(statefile)
logging.debug("Evaluating {0}.".format(os.path.basename(statefile)))
eps=float(self.get_option('epsilon_'+runmode+'_'+self.test,section=traj,required=True))
value=function(*parameters)(states)
logging.debug("Value: {0}, epsilon: {1}".format(value,eps))
if not value<eps:
failure=True
logging.debug("====== FAILED ======")
if failure: sys.exit(-1)
class TrajectoryComparer(Plotter):
"""!
@ingroup Testclasses
Compares several trajectories to a reference trajectory by using function interpolation.
\ref TrajectoryComparer_keys "Configuration file keys" this class understands.
"""
## @addtogroup TestclassKeys
#
# @anchor TrajectoryComparer_keys
# ## TrajectoryComparer configuration file keys
# * `pdf`: Save plots to this pdf file.
# * `reference`: Section of reference trajectory
# * `trajectories`: List of comma-separated trajectories which should be compared to the reference.
#
# The following configuration keys are read from the 'target'-sections.
# * `runmodes_<test>`: For the compare test <test>, only use these runmodes.
# * `columns_<test>`: Use these columns of the output files for the comparison.
# * `epsilon_<runmode>_<test>`: List of acceptable deviations for the given runmode and comparison test.
# * `postprocess_local`: Name of a global function which expects the data array as input and postprocesses the data.
# * `format_local`: specifies which columns are floats (`f`) and which are complex numbers (`c`). Example:
# "f+f+c+c" will result in 6 columns, the two complex number columns are split into real and imaginary parts.
# * `start_<test>`: The first row of the data lines to consider for the comparison test `<test>`.
# * `length_<test>`: How many lines of data to consider for the comparison test `<test>`.
def run(self):
"""!
Runs the test.
"""
trajectories=self.get_option('trajectories',required=True).split(',')
failure=False
self.start_pdf()
reference_plotted=dict()
for traj in trajectories:
for runmode in self._filter_runmodes(section=traj):
for n in range(len(self._get_columns(traj,runmode))):
self.figureLegendRight(ylabel='value '+str(n+1), title=self.test, n=n)
data,timeArray,data_label=self._get_data(section=traj,runmode=runmode,n=n)
reference,reference_label=self._get_reference(section=traj,runmode=runmode,n=n)
if (reference_label,n) not in reference_plotted :
self.plot(timeArray,reference(timeArray),label=reference_label)
reference_plotted[(reference_label,n)]=True
self.plot(timeArray,data(timeArray),label=data_label)
logging.debug("Evaluating {0}, value number {1}.".format(data_label,n+1))
eps=self._get_eps(runmode, traj, n)
if not self._regression(reference,data,timeArray,eps):
logging.debug("====== FAILED ======")
failure=True
self.close_pdf()
if failure:
sys.exit(-1)
def _get_eps(self, runmode, section, n):
return float(self.get_option('epsilon_'+runmode+'_'+self.test,section=section,required=True).split(',')[n])
def _get_columns(self,section,runmode):
return [int(s) for s in self.get_option('columns_'+self.test,section=section,required=True).split(',')]
def _get_reference(self,section,runmode,n):
reference=self.get_option('reference',required=True)
reference_runmode=self.runmodes(section=reference)[0]
result=self._get_data(section=reference,runmode=reference_runmode,n=n)
return result[0],result[2]
def _get_data(self,section,runmode,n):
fname=self.get_option('postprocess_local',section=section)
format=self.get_option('format_local',section=section)
length=self.get_option('length_'+self.test,section=section)
start=self.get_option('start_'+self.test,section=section)
postprocess=globals()[fname] if not fname is None else lambda x: x
result=postprocess(load_sv(self.output(runmode=runmode,section=section),format=format))
if not start is None: result=result[int(start):]
if not length is None: result=result[:int(length)]
timeArray = result[:,0]
data = result[:,self._get_columns(section,runmode)[n]]
return self._interpolate(timeArray,data),timeArray,os.path.basename(self.output(runmode,section))
def _interpolate(self,timeArray,array):
return scipy.interpolate.interp1d(timeArray,array)
def _regression(self, f1, f2, timeArray, eps) :
t0=timeArray[ 0]
t1=timeArray[-1]
res=quadrature(lambda t : (f1(t)-f2(t))**2,t0,t1,maxiter=1000)[0]
logging.debug("Quadrature: {0}, epsilon: {1}".format(res,eps))
return res<eps
def exponential(a,l):
def fn(t):
return a*np.exp(-l*t)
return fn,"{0}*exp(-{1}*t)".format(a,l)
def FreeParticleX(x0,p0):
def fn(t):
return x0+2*p0*t
return fn, "{0}+2*{1}*t".format(x0,p0)
def FreeParticleVarX(dx0,dp0):
def fn(t):
return (dx0+4.*dp0*t**2)**.5
return fn, "({0}+(4*{1}*t)^2)^0.5"
class FunctionComparer(TrajectoryComparer):
"""!
@ingroup Testclasses
Compares several trajectories to a reference function by using function interpolation.
\ref FunctionComparer_keys "Configuration file keys" this class understands.
"""
## @addtogroup TestclassKeys
#
# @anchor FunctionComparer_keys
# ## FunctionComparer configuration file keys
# * `reference_function`: Name of a global function, which should return a tuple of a unary function and a label used in plots.
#
# The following configuration keys are read from the 'target'-sections.
# * `paramters_<test>`: List of tuples or single tuple which are passed to the reference function.
# Example: `[(1,5,3),(2,2,1)]` or `(1,5,3)`. If this is a list, each entry corresponds to a column of the data file,
# otherwise the same parameters are used for all columns.
def _get_reference(self, section, runmode, n):
reference = globals()[self.get_option('reference_function', required=True)]
parameters=self.get_option('parameters_'+self.test, section=section)
parameters=() if parameters is None else ast.literal_eval(parameters)
if type(parameters)==list:parameters=parameters[n]
return reference(*parameters)
def main():
"""!
\brief Main function of the Python test driver.
Command line options are defined here. It is responsible of loading the right `cpypyqed` module
(release or debug) as well as instantiating and running the test class.
"""
op = OptionParser()
cp = configparser.ConfigParser()
op.add_option("--test", help="the name of the test, and the name of the section in the config file")
op.add_option("--testclass", help="the name of the testclass to use, must implement run()")
op.add_option("--script", help="the script to run or the target to compile")
op.add_option("--configuration", help="debug or release")
(options,args) = op.parse_args()
if len(args)==0: op.error("Need configuration file(s) as argument(s).")
cp.read(args)
sys.path.insert(0,cp.get('Setup','modulepath'))
# we can only load the io module after we know where to look for the cpypyqed package
global io
if options.configuration.lower()=="release":
import cpypyqed as io
elif options.configuration.lower()=="debug":
import cpypyqed_d as io
logging.info("Taking cpypyqed from {0}".format(io.__file__))
if options.testclass:
constructor = globals()[options.testclass]
myTestclass = constructor(options,cp)
myTestclass.run()
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.title",
"os.remove",
"numpy.abs",
"optparse.OptionParser",
"os.environ.copy",
"numpy.allclose",
"subprocess.list2cmdline",
"cpypyqed_d.read",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.exp",
"numpy.diag",
"os.path.join",
"matplotlib.font_manager.FontProperties",
... | [((934, 1027), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(asctime)s %(levelname)s %(message)s"""'}), "(level=logging.DEBUG, format=\n '%(asctime)s %(levelname)s %(message)s')\n", (953, 1027), False, 'import logging\n'), ((723, 744), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (737, 744), False, 'import matplotlib\n'), ((1024, 1068), 'logging.getLogger', 'logging.getLogger', (['"""matplotlib.font_manager"""'], {}), "('matplotlib.font_manager')\n", (1041, 1068), False, 'import logging\n'), ((1334, 1366), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (1345, 1366), False, 'import os\n'), ((2279, 2308), 'numpy.zeros', 'np.zeros', (['(input.shape[0], 6)'], {}), '((input.shape[0], 6))\n', (2287, 2308), True, 'import numpy as np\n'), ((2519, 2548), 'numpy.zeros', 'np.zeros', (['(input.shape[0], 5)'], {}), '((input.shape[0], 5))\n', (2527, 2548), True, 'import numpy as np\n'), ((30656, 30670), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (30668, 30670), False, 'from optparse import OptionParser\n'), ((30678, 30705), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (30703, 30705), False, 'import configparser\n'), ((1564, 1583), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (1573, 1583), False, 'import os\n'), ((1938, 1958), 'numpy.genfromtxt', 'np.genfromtxt', (['fname'], {}), '(fname)\n', (1951, 1958), True, 'import numpy as np\n'), ((12173, 12190), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (12188, 12190), False, 'import os\n'), ((15458, 15471), 'cpypyqed_d.read', 'io.read', (['this'], {}), '(this)\n', (15465, 15471), True, 'import cpypyqed_d as io\n'), ((15495, 15509), 'cpypyqed_d.read', 'io.read', (['other'], {}), '(other)\n', (15502, 15509), True, 'import cpypyqed_d as io\n'), ((15704, 15717), 'cpypyqed_d.read', 'io.read', (['this'], {}), '(this)\n', (15711, 15717), True, 'import cpypyqed_d as io\n'), ((15739, 15753), 'cpypyqed_d.read', 'io.read', (['other'], {}), '(other)\n', (15746, 15753), True, 'import cpypyqed_d as io\n'), ((19855, 19956), 'subprocess.Popen', 'subprocess.Popen', (['(command + [self.options.script])'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(command + [self.options.script], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n', (19871, 19956), False, 'import subprocess\n'), ((21134, 21151), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (21149, 21151), True, 'import matplotlib.pyplot as plt\n'), ((21401, 21412), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (21410, 21412), True, 'import matplotlib.pyplot as plt\n'), ((21765, 21803), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': 'n', 'figsize': '(11.6, 8.2)'}), '(num=n, figsize=(11.6, 8.2))\n', (21775, 21803), True, 'import matplotlib.pyplot as plt\n'), ((21845, 21861), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (21854, 21861), True, 'import matplotlib.pyplot as plt\n'), ((21866, 21884), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (21876, 21884), True, 'import matplotlib.pyplot as plt\n'), ((21889, 21904), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (21899, 21904), True, 'import matplotlib.pyplot as plt\n'), ((21976, 21992), 'matplotlib.font_manager.FontProperties', 'FontProperties', ([], {}), '()\n', (21990, 21992), False, 'from matplotlib.font_manager import FontProperties\n'), ((22029, 22103), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.01, 1)', 'loc': '(2)', 'borderaxespad': '(0.0)', 'prop': 'fontP'}), '(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.0, prop=fontP)\n', (22039, 22103), True, 'import matplotlib.pyplot as plt\n'), ((22135, 22166), 'matplotlib.pyplot.setp', 'plt.setp', (['llines'], {'linewidth': '(1.5)'}), '(llines, linewidth=1.5)\n', (22143, 22166), True, 'import matplotlib.pyplot as plt\n'), ((22442, 22472), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'data'], {}), '(time, data, **kwargs)\n', (22450, 22472), True, 'import matplotlib.pyplot as plt\n'), ((22546, 22584), 'numpy.arange', 'np.arange', (['state.shape[0]'], {'dtype': 'float'}), '(state.shape[0], dtype=float)\n', (22555, 22584), True, 'import numpy as np\n'), ((22601, 22641), 'numpy.diag', 'np.diag', (['(nTh ** n / (1.0 + 4) ** (n + 1))'], {}), '(nTh ** n / (1.0 + 4) ** (n + 1))\n', (22608, 22641), True, 'import numpy as np\n'), ((4268, 4294), 'sys.exit', 'sys.exit', (['"""--test missing"""'], {}), "('--test missing')\n", (4276, 4294), False, 'import sys\n'), ((7902, 7939), 'os.path.join', 'os.path.join', (['self.outputdir', 'section'], {}), '(self.outputdir, section)\n', (7914, 7939), False, 'import os\n'), ((7965, 8018), 'os.path.join', 'os.path.join', (['self.outputdir', "(section + '_' + runmode)"], {}), "(self.outputdir, section + '_' + runmode)\n", (7977, 8018), False, 'import os\n'), ((9447, 9488), 'subprocess.call', 'subprocess.call', (['command', '*args'], {}), '(command, *args, **kwargs)\n', (9462, 9488), False, 'import subprocess\n'), ((19532, 19618), 'subprocess.Popen', 'subprocess.Popen', (['(command + [dep])'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(command + [dep], stdout=subprocess.PIPE, stderr=subprocess\n .PIPE)\n', (19548, 19618), False, 'import subprocess\n'), ((19791, 19847), 'subprocess.list2cmdline', 'subprocess.list2cmdline', (['(command + [self.options.script])'], {}), '(command + [self.options.script])\n', (19814, 19847), False, 'import subprocess\n'), ((21159, 21176), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': 'n'}), '(num=n)\n', (21169, 21176), True, 'import matplotlib.pyplot as plt\n'), ((21701, 21718), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (21716, 21718), True, 'import matplotlib.pyplot as plt\n'), ((21726, 21743), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': 'n'}), '(num=n)\n', (21736, 21743), True, 'import matplotlib.pyplot as plt\n'), ((24533, 24545), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (24541, 24545), False, 'import sys\n'), ((27164, 27176), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (27172, 27176), False, 'import sys\n'), ((28899, 28913), 'numpy.exp', 'np.exp', (['(-l * t)'], {}), '(-l * t)\n', (28905, 28913), True, 'import numpy as np\n'), ((30282, 30310), 'ast.literal_eval', 'ast.literal_eval', (['parameters'], {}), '(parameters)\n', (30298, 30310), False, 'import ast\n'), ((9401, 9433), 'subprocess.list2cmdline', 'subprocess.list2cmdline', (['command'], {}), '(command)\n', (9424, 9433), False, 'import subprocess\n'), ((9510, 9523), 'sys.exit', 'sys.exit', (['ret'], {}), '(ret)\n', (9518, 9523), False, 'import sys\n'), ((15522, 15551), 'numpy.allclose', 'np.allclose', (['r_state', 'e_state'], {}), '(r_state, e_state)\n', (15533, 15551), True, 'import numpy as np\n'), ((15555, 15582), 'numpy.allclose', 'np.allclose', (['r_time', 'e_time'], {}), '(r_time, e_time)\n', (15566, 15582), True, 'import numpy as np\n'), ((15766, 15803), 'numpy.allclose', 'np.allclose', (['r_state[-1]', 'e_state[-1]'], {}), '(r_state[-1], e_state[-1])\n', (15777, 15803), True, 'import numpy as np\n'), ((15807, 15842), 'numpy.allclose', 'np.allclose', (['r_time[-1]', 'e_time[-1]'], {}), '(r_time[-1], e_time[-1])\n', (15818, 15842), True, 'import numpy as np\n'), ((19482, 19522), 'subprocess.list2cmdline', 'subprocess.list2cmdline', (['(command + [dep])'], {}), '(command + [dep])\n', (19505, 19522), False, 'import subprocess\n'), ((20180, 20245), 'sys.exit', 'sys.exit', (['"""Compilation was successful, but failure was expected."""'], {}), "('Compilation was successful, but failure was expected.')\n", (20188, 20245), False, 'import sys\n'), ((20306, 20324), 'logging.debug', 'logging.debug', (['std'], {}), '(std)\n', (20319, 20324), False, 'import logging\n'), ((20333, 20351), 'logging.debug', 'logging.debug', (['err'], {}), '(err)\n', (20346, 20351), False, 'import logging\n'), ((24116, 24134), 'cpypyqed_d.read', 'io.read', (['statefile'], {}), '(statefile)\n', (24123, 24134), True, 'import cpypyqed_d as io\n'), ((12296, 12344), 'os.path.join', 'os.path.join', (['cpypyqed_builddir', '"""cppqedmodules"""'], {}), "(cpypyqed_builddir, 'cppqedmodules')\n", (12308, 12344), False, 'import os\n'), ((22657, 22685), 'numpy.abs', 'np.abs', (['(state - expected_rho)'], {}), '(state - expected_rho)\n', (22663, 22685), True, 'import numpy as np\n'), ((24479, 24516), 'logging.debug', 'logging.debug', (['"""====== FAILED ======"""'], {}), "('====== FAILED ======')\n", (24492, 24516), False, 'import logging\n'), ((24182, 24209), 'os.path.basename', 'os.path.basename', (['statefile'], {}), '(statefile)\n', (24198, 24209), False, 'import os\n'), ((27058, 27095), 'logging.debug', 'logging.debug', (['"""====== FAILED ======"""'], {}), "('====== FAILED ======')\n", (27071, 27095), False, 'import logging\n')] |
import os
import numpy as np
from collections import Counter
from typing import List, Tuple
DIRNAME = os.path.dirname(__file__)
class Board:
def __init__(self, grid:List[List[int]]):
self.size = len(grid)
self.numbers = np.array(grid)
self.crossed = np.array([[0 for _ in range(self.size)] for _ in range(self.size)])
def check_for_number(self, number:int) -> bool:
for i, row in enumerate(self.numbers):
for ii, val in enumerate(row):
if self.numbers[i][ii] == number:
self.crossed[i][ii] = 1
return True
return False
def check_if_winner(self) -> bool:
rows = sum(self.crossed)
cols = sum(np.array(list(zip(*self.crossed))))
return any(rows==self.size) or any(cols==self.size)
def calculate_score(self, number) -> int:
unused = (self.crossed-1)*-1 * self.numbers
return int(sum(sum(unused)) * number)
def show_board(self) -> None:
print(self.crossed * self.numbers)
class Game:
def __init__(self, path:str):
self.numbers, self.boards = self.read_file(path)
def read_file(self, path:str) -> Tuple[List[int], List[List[int]]]:
relative_path = os.path.join(DIRNAME, path)
with open(relative_path) as file:
numbers = [int(num) for num in file.readline().strip().split(',')]
boards = []
current_board = []
for line in file.readlines():
if line == '\n':
if current_board != []:
boards.append(Board(current_board))
current_board = []
else:
current_board.append([int(char.strip()) for char in line.split(' ') if char!=''])
boards.append(Board(current_board))
return numbers, boards
def find_winning_score(self, find_loser:bool=False) -> int:
boards_in_play = list(range(len(self.boards)))
for number in self.numbers:
for board_num, board in enumerate(self.boards):
found = board.check_for_number(number)
if found:
winner = board.check_if_winner()
if winner:
if find_loser:
if board_num in boards_in_play:
boards_in_play.remove(board_num)
if len(boards_in_play) == 0:
return board.calculate_score(number)
else:
return board.calculate_score(number)
if __name__ == '__main__':
example = Game('example.txt')
actual = Game('input.txt')
# part 1
assert example.find_winning_score() == 4512
print(f'Part 1 solution: {actual.find_winning_score()}')
# part 2
assert example.find_winning_score(find_loser=True) == 1924
print(f'Part 1 solution: {actual.find_winning_score(find_loser=True)}') | [
"os.path.dirname",
"numpy.array",
"os.path.join"
] | [((103, 128), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (118, 128), False, 'import os\n'), ((243, 257), 'numpy.array', 'np.array', (['grid'], {}), '(grid)\n', (251, 257), True, 'import numpy as np\n'), ((1259, 1286), 'os.path.join', 'os.path.join', (['DIRNAME', 'path'], {}), '(DIRNAME, path)\n', (1271, 1286), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 18 18:19:09 2020
@author: RobertTeresi
"""
import os
import io
import sys
import re
from subprocess import call
from functools import partial
from csv import reader, writer
from pathlib import Path
import shutil # Copy files to another directory
#from multiprocessing import Pool
from pathos.multiprocessing import ProcessingPool as PathosPool # Pool in class
import numpy as np
import pandas as pd
from datetime import datetime
import gzip
## TO DO: Get Suffix Field out of .xml
from pubmed_parser import medline_parser
class pubmed_processor:
def __init__(self, working_directory, title_stop_path,
affil_stop_path, mesh_stop_path, rm_stopwords=True,
affiliation_correction = True):
# Initialize all basic variables to run functions
self.working_directory = working_directory
#("/Users/RobertTeresi/Documents/GitHub/"
# "Pubmed_Medline_Author_Disambiguation-orig/"
#"Pubmed_Medline_Author_Disambiguation")
self.rm_stopwords = rm_stopwords
(self.affil_stop_path,
self.mesh_stop_path,
self.title_stop_path) = (affil_stop_path,mesh_stop_path,
title_stop_path)
# Compile and import C++ function
if os.getcwd()[-3:] != "C++":
os.chdir("C++")
call(["chmod", "755", "affilbind_compile.sh"]) # Make executable
call("./affilbind_compile.sh") # Execute compilation
def process_data(self, filepath, filename, respath, rm_stopwords=True,
stop_paths=None, affiliation_correction=True):
"""
Take zipped xml from medline and transform it into a clean, gzipped csv.
rm_stopwords set true to remove appropriate stopwords from title,
affiliation, and mesh fields.
stop_paths = List or tuple of paths to title, affiliation, and mesh
stopwords in that order. Default is none - take paths from
main function, where the paths are defined globally
"""
def clean_string(string, stopword_list = None,
rm_stopwords = True, mesh = False):
"""Convert the string to lower and take out stopwords."""
# If we don't supply a stopword list, but don't have remove stopwords set to False, we raise an error
if stopword_list is None and rm_stopwords:
raise Exception('Please Supply a Stopword List or set rm_stopwords'
'to False')
# Mesh listings need a slightly different treatment
if mesh:
# Remove numerical MeSH Codes
string = [ re.sub(r'([ ]?D\d{6}\:)','',x) for x in string]
string = [x.lower() for x in string]
if rm_stopwords:
# Remove stop words
string = [x for x in string if x not in stopword_list]
# Convert to '/' separated string
string = '/'.join(string)
string = re.sub(r',','',string)
#string = re.sub(r'([ ]?d\d{6}\:' + r'|[ ]?d\d{6}\:'.join(stopword_list) + r')', '', string)
# Now I only want to keep the unique ID
#string = re.sub(r'[ ]?(d\d{6})(\:[a-z\-\'\.\, ]+)', r'\1', string)
return(string)
else:
if type(string) is list:
string = string = ('/'.join(string)).lower()
# First, convert the string to lower
string = string.lower()
string = re.sub(r'\W+|\b[a-z]\b', ' ', string)
# Remove stopwords if the option is set to true
if rm_stopwords:
string = re.sub(r'\b(' + r'| '.join(stopword_list) +
r')\b\s*', ' ', string)
# Trim whitespaces (induced by cleaning or otherwise)
string = string.strip() # From beginning and end
string = re.sub(r'[ ]+'," ",string) # From the middle
return(string.split('/'))
def extract_email_from_affiliations(affiliation):
"""Extract an author's email from their list of affiliations."""
try:
emailregex = r'[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+'
return(re.findall(emailregex, affiliation)[0])
except:
return('')
def read_stoplist(stoplistpath, sep = ", "):
"""
Parse our list of stopwords.
stoplistpath = a path to a .txt file with stopwords
sep = Separator of words in list (separated by ", " by default)
"""
with open(stoplistpath, 'r') as file:
data = file.read().split(sep)
return(data)
def copy_remove(author, coauthors):
"""Remove author from list of coauthors."""
lcopy = list(coauthors)
lcopy.remove(author)
return(lcopy)
def add_to_affil_dict(lname, affilwordvec, d):
for affilword in affilwordvec:
if affilword == '':
continue
elif d[lname][1].get(affilword):
d[lname][1][affilword] += 1
else:
d[lname][1][affilword] = 1
d[lname][0] += 1
def explode(df, lst_cols, fill_value=''):
# make sure `lst_cols` is a list
if lst_cols and not isinstance(lst_cols, list):
lst_cols = [lst_cols]
# all columns except `lst_cols`
idx_cols = df.columns.difference(lst_cols)
# calculate lengths of lists
lens = df[lst_cols[0]].str.len()
if (lens > 0).all():
# ALL lists in cells aren't empty
return pd.DataFrame({
col:np.repeat(df[col].values, df[lst_cols[0]].str.len())
for col in idx_cols
}).assign(**{col:np.concatenate(df[col].values) for col in lst_cols}) \
.loc[:, df.columns]
else:
# at least one list in cells is empty
return pd.DataFrame({
col:np.repeat(df[col].values, df[lst_cols[0]].str.len())
for col in idx_cols
}).assign(**{col:np.concatenate(df[col].values) for col in lst_cols}) \
.append(df.loc[lens==0, idx_cols]).fillna(fill_value) \
.loc[:, df.columns]
print(filename + " - start")
begin = datetime.now()
print(filename + " - load data")
df = medline_parser.parse_medline_xml(filepath)
df = pd.DataFrame(df)
print(filename + " - save to " + respath)
# Get rid of unneeded columns -- can maybe work this into the parser later
df = df.drop(['publication_types', 'chemical_list',
'keywords', 'doi', 'references',
'delete','pmc','other_id','medline_ta',
'nlm_unique_id','issn_linking'],
axis=1)
# Sometimes authors and affiliations not read in as list
if type(df.loc[2,'authors']) is not list:
df['authors'] = df['authors'].apply(lambda x:x.split(";"))
df['affiliations'] = df['affiliations'].apply(lambda x:x.split("&&"))
# Now create coauthors column (Same as authors column for now)
df['coauthors'] = df['authors']
# Explode Name and Affiliation cols.
# Key cols now author-article, not article.
df = explode(df,['authors','affiliations'])
# Rename authors to author (the author in this observation)
df.rename(columns={'authors': 'author'}, inplace=True)
# Now remove author from coauthor list.
# Careful not to remove other authors with same name. Use list.remove()
#
# Explode apparently makes variables in slots that used to be in the same
# row references of the same object.
# (i.e. the list coauthors for every row in an article are references to
# the same variable)
#
# Therefore we have to make a copy of each list and then remove the author
# from the row of coauthors.
#
# Finally, convert the list to a '/' joined string
df['coauthors'] = ['/'.join(copy_remove(author, coauthors))
for author, coauthors
in zip(df['author'],df['coauthors'])]
# Extract emails
df['email'] = [extract_email_from_affiliations(affiliation)
for affiliation in df['affiliations']]
# Replace missing values (mesh, affiliation, or title) with ''
nacols = ['mesh_terms','affiliations','title', 'email']
df[nacols] = df[nacols].fillna('')
# Split into firstname lastname
df = df[df['author'] != ''] # Some entries have no author! (Can't use)
df = df.reset_index() # reset index
df = df.drop('index', axis = 1) # get rid of old index column
splitnames = df.author.str.split(expand=False)
df['Lastname'] = [x[-1] if len(x) >= 1 else '' for x in splitnames]
df['Firstname'] = [' '.join(x[:-1]) if len(x) >= 2 else ''
for x in splitnames]
df = df[df['Lastname'] != '']
del splitnames
# Clean strings
# Take out stopwords, non-alphanumeric chars, and single-character words
if rm_stopwords:
if stop_paths is None:
df['title'] = [clean_string(title,
read_stoplist(self.title_stop_path))
if title != '' else ''
for title in df['title']]
df['affiliations'] = [clean_string(affiliation,
read_stoplist(self.affil_stop_path))
if affiliation != '' else ''
for affiliation in df['affiliations']]
df['mesh_terms'] = [clean_string(mesh_term,
read_stoplist(self.mesh_stop_path),
mesh = True)
if mesh_term != '' else ''
for mesh_term in df['mesh_terms']]
elif len(stop_paths) == 3:
try:
df['title'] = [clean_string(title,
read_stoplist(stop_paths[0]))
for title in df['title']]
df['affiliations'] = [clean_string(affiliation,
read_stoplist(stop_paths[1]))
for affiliation in df['affiliations']]
df['mesh_terms'] = [clean_string(mesh_term,
read_stoplist(stop_paths[2]),
mesh = True)
for mesh_term in df['mesh_terms']]
except Exception:
raise Exception("Error while trying to read stoplists not"
" defined in main() function.")
else:
df['title'] = [clean_string(title,rm_stopwords = False)
for title in df['title']]
df['affiliations'] = [clean_string(affiliation,rm_stopwords = False)
for affiliation in df['affiliations']]
df['mesh_terms'] = [clean_string(mesh_term,rm_stopwords = False)
for mesh_term in df['mesh_terms']]
# Now separate author name into first, middle, and last
'''
if affiliation_correction:
# First make dictionary structure
# (dict<Lastnamem,pair<int, dict<Affil,count>>>)
affildict = {lname : [0, dict()] for lname in df['Lastname']}
# Dict is mutable + passed by reference
# updates without having to return
list(map(partial(add_to_affil_dict, d=affildict),
df['Lastname'], df['affiliations']))
'''
# Affiliations and MeSH back into format they will be passed to diambig in
df['affiliations'] = ['/'.join(affil) for affil in df['affiliations']]
# Change column names to be consistent with C++ script
# Middlename column will be same as first name
df['Middlename'] = df['Firstname']
# Affiliation Exists column
df['Affiliation_Exists'] = df['affiliations']\
.apply(lambda x: 0 if x == '' else 1)
df.rename(columns={'title':'Title',
'email':'Email',
'language':'Language',
'affiliations':'Affiliations',
'country':'Country',
'journal':'Journal',
'pmid':'PMID',
'mesh_terms':'MeSH',
'coauthors':'Coauthor'},
inplace=True)
# Drop author column
df = df.drop('author', axis = 1)
# Change title column from a list to a string
df['Title'] = [' '.join(x) for x in df['Title']]
df['MeSH'] = [' '.join(x) for x in df['MeSH']]
df['Affiliation_Exists'] = df['Affiliation_Exists'].astype(str)
# Remove all commas from dataframe (messes up read in C++)
df = df.applymap(lambda x: re.sub(r',','',x))
df['Affiliation_Exists'] = df['Affiliation_Exists'].astype(int)
df = df[['PMID', 'Lastname','Firstname','Middlename', 'Title',
'Journal', 'pubdate','MeSH','Language',
'Affiliations', 'Affiliation_Exists', 'Coauthor','Email']]
# Save to a compressed csv in the results directory
df.to_csv(respath, compression = "gzip", sep=",", index = False)
end = datetime.now() - begin
print(filename + " - done - " + str(np.round(end.seconds / 60)) + "min")
# If we are correcting affiliations, return the dictionary.
# The pool function calling it will return with a list of all dictionaries.
return(affildict)
def start(self, text_data_dir, res_dir, nprocs=8):
'''
entry function
text_data_dir: folder of raw data
text_res_dir: folder of output
verbose: int. Information is printed every N records
nprocs: number of cores in parallel
'''
p = PathosPool(nprocs)
filepathsvec,filenamesvec, respaths = list(), list(), list()
for dirpath, _, filenames in os.walk(text_data_dir):
for filename in filenames[16:40]:
if (("gz" in filename) and ('md5' not in filename )
and ('copy' not in filename)):
filepath = os.path.join(dirpath, filename)
print(filepath)
res_name = filename.split(".")[0] + ".csv.gz"
respath = os.path.join(res_dir, res_name)
#if os.path.exists(respath):
# pass
#else:
if True:
filepathsvec.append(filepath)
filenamesvec.append(filename)
respaths.append(respath)
#p.apply_async(process_data, args = (filepath,filename,
# respath, True,
# [title_stop_path,
# affil_stop_path,
# mesh_stop_path]))
self.affildicts = p.amap(partial(self.process_data,
stop_paths = [self.title_stop_path,
self.affil_stop_path,
self.mesh_stop_path],
rm_stopwords=True,
affiliation_correction = True),
filepathsvec, filenamesvec, respaths)
p.close()
p.join() # Having an issue joining
print("joined")
p.clear() # Delete the pool
# Now send the affildicts away to C++
#print("Sending affiliations to C++ function")
#import affilbind
#affilbind.affil_stopword_find(self.affildicts.get(),
# (self.working_directory +
# "/Results/affiliation_stopword_list.txt")
# )
#print("Complete")
| [
"pandas.DataFrame",
"functools.partial",
"os.path.join",
"numpy.concatenate",
"os.getcwd",
"os.walk",
"datetime.datetime.now",
"re.findall",
"subprocess.call",
"pubmed_parser.medline_parser.parse_medline_xml",
"numpy.round",
"pathos.multiprocessing.ProcessingPool",
"os.chdir",
"re.sub"
] | [((7000, 7014), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7012, 7014), False, 'from datetime import datetime\n'), ((7069, 7111), 'pubmed_parser.medline_parser.parse_medline_xml', 'medline_parser.parse_medline_xml', (['filepath'], {}), '(filepath)\n', (7101, 7111), False, 'from pubmed_parser import medline_parser\n'), ((7125, 7141), 'pandas.DataFrame', 'pd.DataFrame', (['df'], {}), '(df)\n', (7137, 7141), True, 'import pandas as pd\n'), ((15375, 15393), 'pathos.multiprocessing.ProcessingPool', 'PathosPool', (['nprocs'], {}), '(nprocs)\n', (15385, 15393), True, 'from pathos.multiprocessing import ProcessingPool as PathosPool\n'), ((15502, 15524), 'os.walk', 'os.walk', (['text_data_dir'], {}), '(text_data_dir)\n', (15509, 15524), False, 'import os\n'), ((1416, 1431), 'os.chdir', 'os.chdir', (['"""C++"""'], {}), "('C++')\n", (1424, 1431), False, 'import os\n'), ((1444, 1490), 'subprocess.call', 'call', (["['chmod', '755', 'affilbind_compile.sh']"], {}), "(['chmod', '755', 'affilbind_compile.sh'])\n", (1448, 1490), False, 'from subprocess import call\n'), ((1521, 1551), 'subprocess.call', 'call', (['"""./affilbind_compile.sh"""'], {}), "('./affilbind_compile.sh')\n", (1525, 1551), False, 'from subprocess import call\n'), ((14770, 14784), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (14782, 14784), False, 'from datetime import datetime\n'), ((16641, 16802), 'functools.partial', 'partial', (['self.process_data'], {'stop_paths': '[self.title_stop_path, self.affil_stop_path, self.mesh_stop_path]', 'rm_stopwords': '(True)', 'affiliation_correction': '(True)'}), '(self.process_data, stop_paths=[self.title_stop_path, self.\n affil_stop_path, self.mesh_stop_path], rm_stopwords=True,\n affiliation_correction=True)\n', (16648, 16802), False, 'from functools import partial\n'), ((1377, 1388), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1386, 1388), False, 'import os\n'), ((3241, 3264), 're.sub', 're.sub', (['""","""', '""""""', 'string'], {}), "(',', '', string)\n", (3247, 3264), False, 'import re\n'), ((3788, 3827), 're.sub', 're.sub', (['"""\\\\W+|\\\\b[a-z]\\\\b"""', '""" """', 'string'], {}), "('\\\\W+|\\\\b[a-z]\\\\b', ' ', string)\n", (3794, 3827), False, 'import re\n'), ((4301, 4328), 're.sub', 're.sub', (['"""[ ]+"""', '""" """', 'string'], {}), "('[ ]+', ' ', string)\n", (4307, 4328), False, 'import re\n'), ((14301, 14319), 're.sub', 're.sub', (['""","""', '""""""', 'x'], {}), "(',', '', x)\n", (14307, 14319), False, 'import re\n'), ((2832, 2865), 're.sub', 're.sub', (['"""([ ]?D\\\\d{6}\\\\:)"""', '""""""', 'x'], {}), "('([ ]?D\\\\d{6}\\\\:)', '', x)\n", (2838, 2865), False, 'import re\n'), ((4667, 4702), 're.findall', 're.findall', (['emailregex', 'affiliation'], {}), '(emailregex, affiliation)\n', (4677, 4702), False, 'import re\n'), ((15726, 15757), 'os.path.join', 'os.path.join', (['dirpath', 'filename'], {}), '(dirpath, filename)\n', (15738, 15757), False, 'import os\n'), ((15890, 15921), 'os.path.join', 'os.path.join', (['res_dir', 'res_name'], {}), '(res_dir, res_name)\n', (15902, 15921), False, 'import os\n'), ((14837, 14863), 'numpy.round', 'np.round', (['(end.seconds / 60)'], {}), '(end.seconds / 60)\n', (14845, 14863), True, 'import numpy as np\n'), ((6418, 6448), 'numpy.concatenate', 'np.concatenate', (['df[col].values'], {}), '(df[col].values)\n', (6432, 6448), True, 'import numpy as np\n'), ((6771, 6801), 'numpy.concatenate', 'np.concatenate', (['df[col].values'], {}), '(df[col].values)\n', (6785, 6801), True, 'import numpy as np\n')] |
# Written by i3s
import os
import numpy as np
from sklearn.preprocessing import OneHotEncoder
import pandas as pd
import seaborn as sns
import time
from sklearn.model_selection import KFold
from matplotlib import pyplot as plt
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.cross_decomposition import PLSRegression
from sklearn.linear_model import LogisticRegression, Lasso
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
def proj_l1ball(y, eta):
"""
Note that the y should be better 1D or after some element-wise operation, the results will turn to be un predictable.
This function will automatically reshape the y as (m,), where m is the y.size, or the y.shape[0]*y.shape[1].
"""
if type(y) is not np.ndarray:
y = np.array(y)
if y.ndim > 1:
y = np.reshape(y, (-1,))
return np.maximum(
np.absolute(y)
- np.amax(
[
np.amax(
(np.cumsum(np.sort(np.absolute(y), axis=0)[::-1], axis=0) - eta)
/ (np.arange(y.shape[0]) + 1)
),
0,
]
),
0,
) * np.sign(y)
def centroids(XW, Y, k):
Y = np.reshape(Y, -1)
d = XW.shape[1]
mu = np.zeros((k, d))
"""
since in python the index starts from 0 not from 1,
here the Y==i will be change to Y==(i+1)
Or the values in Y need to be changed
"""
for i in range(k):
C = XW[Y == (i + 1), :]
mu[i, :] = np.mean(C, axis=0)
return mu
def class2indicator(y, k):
if len(y.shape) > 1:
# Either throw exception or transform y, here the latter is chosen.
# Note that a list object has no attribute 'flatten()' as np.array do,
# We use x = np.reshape(y,-1) instead of x = y.flatten() in case of
# the type of 'list' of argument y
y = np.reshape(y, -1)
n = len(y)
Y = np.zeros((n, k)) # dtype=float by default
"""
since in python the index starts from 0 not from 1,
here the y==i in matlab will be change to y==(i+1)
"""
for i in range(k):
Y[:, i] = y == (i + 1)
return Y
def nb_Genes(w):
# Return the number of selected genes from the matrix (numpy.ndarray) w
d = w.shape[0]
ind_genes = np.zeros((d, 1))
for i in range(d):
if np.linalg.norm(w[i, :]) > 0:
ind_genes[i] = 1
indGene_w = np.where(ind_genes == 1)[0]
nbG = int(np.sum(ind_genes))
return nbG, indGene_w
def select_feature_w(w, featurenames):
k = w.shape[1]
d = w.shape[0]
lst_features = []
lst_norm = []
for i in range(k):
s_tmp = w[:, i] # the i-th column
f_tmp = np.abs(s_tmp) # the absolute values of this column
ind = np.argsort(f_tmp)[
::-1
] # the indices of the sorted abs column (descending order)
f_tmp = np.sort(f_tmp)[::-1] # the sorted abs column (descending order)
nonzero_inds = np.nonzero(f_tmp)[0] # the nonzero indices
lst_f = []
lst_n = []
if len(nonzero_inds) > 0:
nozero_ind = nonzero_inds[-1] # choose the last nonzero index
if nozero_ind == 0:
lst_f.append(featurenames[ind[0]])
lst_n.append(s_tmp[ind[0]])
else:
for j in range(nozero_ind + 1):
lst_f.append(featurenames[ind[j]])
lst_n = s_tmp[ind[0 : (nozero_ind + 1)]]
lst_features.append(lst_f)
lst_norm.append(lst_n)
n_cols_f = len(lst_features)
n_rows_f = max(map(len, lst_features)) # maxmum subset length
n_cols_n = len(lst_norm)
n_rows_n = max(map(len, lst_norm))
for i in range(n_cols_f):
ft = np.array(lst_features[i])
ft.resize(n_rows_f, refcheck=False)
nt = np.array(lst_norm[i])
nt.resize(n_rows_n, refcheck=False)
if i == 0:
features = ft
normW = nt
continue
features = np.vstack((features, ft))
normW = np.vstack((normW, nt))
features = features.T
normW = normW.T
return features, normW
def compute_accuracy(idxR, idx, k):
"""
# ===============================
#----- INPUT
# idxR : real labels
# idx : estimated labels
# k : number of class
#----- OUTPUT
# ACC_glob : global accuracy
# tab_acc : accuracy per class
# ===============================
"""
# Note that Python native sum function works better on list than on numpy.array
# while numpy.sum function works better on numpy.array than on list.
# So it will choose numpy.array as the default type for idxR and idx
if type(idxR) is not np.array:
idxR = np.array(idxR)
if type(idx) is not np.array:
idx = np.array(idx)
if idxR.ndim == 2 and 1 not in idxR.shape:
idxR = np.reshape(idxR, (-1, 1))
if idx.ndim == 1:
idx = np.reshape(idx, idxR.shape)
# Global accuracy
y = np.sum(idxR == idx)
ACC_glob = y / len(idxR)
# Accuracy per class
tab_acc = np.zeros((1, k))
"""
since in python the index starts from 0 not from 1,
here the idx(ind)==j in matlab will be change to idx[ind]==(j+1)
"""
for j in range(k):
ind = np.where(idxR == (j + 1))[0]
if len(ind) == 0:
tab_acc[0, j] = 0.0
else:
tab_acc[0, j] = int(np.sum(idx[ind] == (j + 1))) / len(ind)
return ACC_glob, tab_acc
def predict_L1(Xtest, W, mu):
# Chambolle_Predict
k = mu.shape[0]
m = Xtest.shape[0]
Ytest = np.zeros((m, 1))
for i in range(m):
distmu = np.zeros((1, k))
XWi = np.matmul(Xtest[i, :], W)
for j in range(k):
distmu[0, j] = np.linalg.norm(XWi - mu[j, :], 1)
# print(distmu)
# sns.kdeplot(np.array(distmu), shade=True, bw=0.1)
Ytest[i] = np.argmin(distmu) + 1 # Since in Python the index starts from 0
return Ytest
# function to compute the \rho value
def predict_L1_molecule(Xtest, W, mu):
# Chambolle_Predict
k = mu.shape[0]
m = Xtest.shape[0]
Ytest = np.zeros((m, 1))
confidence = np.zeros((m, 1))
for i in range(m):
distmu = np.zeros((1, k))
XWi = np.matmul(Xtest[i, :], W)
for j in range(k):
distmu[0, j] = np.linalg.norm(XWi - mu[j, :], 1)
Ytest[i] = np.argmin(distmu) + 1 # Since in Python the index starts from 0
confidence[i] = (distmu[0, 1] - distmu[0, 0]) / (distmu[0, 1] + distmu[0, 0])
return Ytest, confidence
# =============================Plot functions=================================================
# function to plot the distribution of \rho
def rhoHist(rho, n_equal_bins):
"""
# ===============================
#----- INPUT
# rho : df_confidence
# n_equal_bins : the number of histogram bins
#
#----- OUTPUT
# plt.show()
# ===============================
"""
# The leftmost and rightmost bin edges
first_edge, last_edge = rho.min(), rho.max()
bin_edges = np.linspace(
start=first_edge, stop=last_edge, num=n_equal_bins + 1, endpoint=True
)
_ = plt.hist(rho, bins=bin_edges)
plt.title("Histogram of confidence score")
plt.show()
def pd_plot(X, Yr, W, flag=None):
plt.figure()
X_transform = np.dot(X, W)
# cluster 1
index1 = np.where(Yr == 1)
X_1 = X_transform[index1[0], :]
c1 = np.mean(X_1, axis=0)
# plt.scatter(X_1[:,0],X_1[:,8],c='b', label='cluster1')
# cluster 2
index2 = np.where(Yr == 2)
X_2 = X_transform[index2[0], :]
c2 = np.mean(X_2, axis=0)
if flag == True:
plt.scatter(c1[0], c1[1], c="y", s=100, marker="*", label="center1")
plt.scatter(c2[0], c2[1], c="c", s=100, marker="*", label="center2")
plt.plot(X_1[:, 0], X_1[:, 1], "ob", label="cluster1")
plt.plot(X_2[:, 0], X_2[:, 1], "^r", label="cluster2")
plt.title("Primal_Dual")
plt.legend()
plt.show()
def pca_plot(X, Yr, W, flag=None):
plt.figure()
# if flag==True:
# X=np.dot(X,W)
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X)
X_norm = X_pca
# cluster 1
index1 = np.where(Yr == 1)
X_1 = X_norm[index1[0], :]
c1 = np.mean(X_1, axis=0)
# plt.scatter(X_1[:,0],X_1[:,8],c='b', label='cluster1')
# cluster 2
index2 = np.where(Yr == 2)
X_2 = X_norm[index2[0], :]
c2 = np.mean(X_2, axis=0)
# plt.scatter(X_2[:,0],X_2[:,8],c='g',label='cluster2')
if flag == True:
plt.scatter(c1[0], c1[1], c="y", s=100, marker="*", label="center1")
plt.scatter(c2[0], c2[1], c="c", s=100, marker="*", label="center2")
plt.plot(X_1[:, 0], X_1[:, 1], "ob", label="cluster1")
plt.plot(X_2[:, 0], X_2[:, 1], "^r", label="cluster2")
plt.title("PCA")
plt.legend()
plt.show()
def Predrejection(df_confidence, eps, num_eps):
"""
# =====================================================================
# It calculates the false rate according to the value of epsilon
#
#----- INPUT
# df_confidence : dataframe which contains predicted label,
# original label and rho
# eps : the threshold
# num_eps : the number of epsilon that can be tested
#----- OUTPUT
# FalseRate : An array that contains the falserate according to epsilon
# =====================================================================
"""
Yr = np.array(df_confidence["Yoriginal"])
Yr[np.where(Yr == 2)] = -1
Ypre = np.array(df_confidence["Ypred"])
Ypre[np.where(Ypre == 2)] = -1
rho = df_confidence["rho"]
epsList = np.arange(0, eps, eps / num_eps)
falseRate = []
rejectSample = []
for epsilon in epsList:
index = np.where((-epsilon < rho) & (rho < epsilon))
Yr[index] = 0
Ypre[index] = 0
Ydiff = Yr - Ypre
rejectRate = len(index[0]) / len(Yr)
error = len(np.where(Ydiff != 0)[0]) / len(Yr)
falseRate.append(error)
rejectSample.append(rejectRate)
plt.figure()
plt.plot(epsList, falseRate)
plt.xlabel("Confidence score prediction")
plt.ylabel("FN+FP (ratio)")
# plot the number of rejected samples
plt.figure()
plt.plot(epsList, rejectSample)
plt.xlabel("Confidence score prediction")
plt.ylabel(" Reject samples (ratio) ")
return np.array(falseRate)
# ==============================================================================
def predict_FISTA(Xtest, W, mu):
# Chambolle_Predict
k = mu.shape[0]
m = Xtest.shape[0]
Ytest = np.zeros((m, 1))
for i in range(m):
distmu = np.zeros((1, k))
XWi = np.matmul(Xtest[i, :], W)
for j in range(k):
distmu[0, j] = np.linalg.norm(XWi - mu[j, :], 2)
Ytest[i] = np.argmin(distmu) + 1 # Since in Python the index starts from 0
return Ytest
def normest(X, tol=1.0e-6, maxiter=100):
# import necessary modules
import scipy.sparse
import numpy as np
import warnings
if scipy.sparse.issparse(X):
x = np.array(np.sum(np.abs(X), axis=0))
x = np.reshape(x, max(x.shape))
elif type(X) == np.matrix:
x = np.sum(np.abs(np.asarray(X)), axis=0)
x = np.reshape(x, max(x.shape))
else:
x = np.sum(np.abs(X), axis=0)
norm_e = np.linalg.norm(x)
if norm_e == 0:
return norm_e
x = x / norm_e
norm_e0 = 0
count = 0
while np.abs(norm_e - norm_e0) > tol * norm_e:
norm_e0 = norm_e
Xx = np.matmul(X, x)
if np.count_nonzero(Xx) == 0:
Xx = np.random.rand(Xx.shape[0])
x = np.matmul(X.T, Xx)
normx = np.linalg.norm(x)
norm_e = normx / np.linalg.norm(Xx)
x = x / normx
count += 1
if count > maxiter:
warnings.warn(
"Normest::NotConverge:the number of iterations exceeds {} times.\nThe error is {}, the tolerance is {}".format(
maxiter, np.abs(norm_e - norm_e0), tol
),
RuntimeWarning,
)
break
return norm_e
def merge_topGene_norm(topGenes, normW, clusternames):
"""
# =====================================================================
# It merge the two output from function select_features_w into a new
# pandas.DataFrame whose columns will be the elements in clusternames
# and each of the column will have two subcolumns: topGenes and weight
#
#----- INPUT
# topGenes : ndarray of top Genes chosen by select_features_w
# normW : normWeight of each genes given by select_features_w
# clusternames : A list of the names of each class.
#----- OUTPUT
# df_res : A DataFrame with each colum the first subcolumn the genes
# and second subcolumn their norm of weight
# =====================================================================
"""
if topGenes.shape != normW.shape:
raise ValueError("The dimension of the two input should be the same")
m, n = topGenes.shape
nbC = len(clusternames)
res = np.dstack((topGenes, normW))
res = res.reshape(m, 2 * n)
lst_col = []
for i in range(nbC):
lst_col.append((clusternames[i], "topGenes"))
lst_col.append((clusternames[i], "Weights"))
df_res = pd.DataFrame(res, columns=lst_col)
df_res.columns = pd.MultiIndex.from_tuples(
df_res.columns, names=["CluserNames", "Attributes"]
)
return df_res
def merge_topGene_norm_acc(
topGenes,
normW,
clusternames,
acctest,
nbr_features=30,
saveres=False,
file_tag=None,
outputPath="../results/",
):
"""
# =============================================================================================== \n
# Based on the function merge_topGebe_norm, replace the column name for \n
# normW by the accuracy \n
#----- INPUT \n
# topGenes (ndarray or DataFrame) : Top Genes chosen by select_features_w \n
# normW (ndarray or DataFrame) : The normWeight of each genes given by select_features_w \n
# clusternames (list or array) : A list of the names of each class \n
# acctest (list or array) : The list of the test accuracy \n
# saveres (optional, boolean) : True if we want to save the result to local \n
# file_tag (optional, string) : A file tag which will be the prefix of the file name \n
# outputPath (optional, string) : The output Path of the file \n
# ----- OUTPUT \n
# df_res : A DataFrame with each colum the first subcolumn the genes \n
# and second subcolumn their norm of weight \n
# =============================================================================================== \n
"""
if type(topGenes) is pd.DataFrame:
topGenes = topGenes.values
if type(normW) is pd.DataFrame:
normW = normW.values
if topGenes.shape != normW.shape:
raise ValueError("The dimension of the two input should be the same")
m, n = topGenes.shape
nbC = len(clusternames)
res = np.dstack((topGenes, normW))
res = res.reshape(m, 2 * n)
lst_col = []
acctest_mean = acctest.values.tolist()[4]
for i in range(nbC):
lst_col.append((clusternames[i], "topGenes"))
astr = str(acctest_mean[i])
lst_col.append((astr, "Weights"))
df_res = pd.DataFrame(res[0:nbr_features, :], columns=lst_col)
df_res.columns = pd.MultiIndex.from_tuples(
df_res.columns, names=["CluserNames", "Attributes"]
)
if saveres:
df_res.to_csv(
"{}{}_Heatmap of Acc_normW_Topgenes.csv".format(outputPath, file_tag),
sep=";",
)
return df_res
def compare_2topGenes(
topGenes1,
topGenes2,
normW1=None,
normW2=None,
lst_col=None,
nbr_limit=30,
printOut=False,
):
"""
#=======================================================================================
# Compare column by column the elements between to topGenes, it choose for
# each column first "nbr" elements to check.
# The two topGenes should be in same size of columns
# ----- INPUT
# topGenes1, topGenes2 (DataFrame) : Two topGenes to be compared
# normW1, normW2 (DataFrame,optional): Two matrix of weights correspondent. Default: None
# lst_col (list, optional) : If given, only the chosen column will be compared. Default: None
# nbr_limit (scalar, optional) : Number of the lines to be compared. Default: 30
# printOut (boolean, optional) : If True, the comparison result will be shown on screen. Default: False
# ----- OUTPUT
# out (string) : It returns a string of the comparing result as output.
#=======================================================================================
"""
import pandas as pd
import numpy as np
if type(topGenes1) != type(topGenes2):
raise ValueError("The two topGenes to be compared should be of the same type.")
if type(topGenes1) is not pd.DataFrame:
col = ["C" + str(i) for i in topGenes1.shape[1]]
topGenes1 = pd.DataFrame(topGenes1, columns=col)
topGenes2 = pd.DataFrame(topGenes2, columns=col)
out = []
out.append("Comparing the two TopGenes:\n")
# After the benchmark, the appended list and then converted to whole string seems to be the least consuming
list_name = list(topGenes1.columns)
if lst_col is not None:
list_name = [list_name[ind] for ind in lst_col]
for name in list_name:
out.append(
"{0:{fill}{align}40}\n".format(" Class %s " % name, fill="=", align="^")
)
col_1 = np.array(topGenes1[[name]], dtype=str)
col_2 = np.array(topGenes2[[name]], dtype=str)
# Here np.nozero will return a tuple of 2 array corresponding the first
# and the second dimension while the value of second dimension will
# always be 0. So the first dimension's last location+1 will be the length
# of nonzero arrays and that it's just the location of the first zero
# element
length_nonzero_1 = np.nonzero(col_1)[0][-1] + 1
length_nonzero_2 = np.nonzero(col_2)[0][-1] + 1
# np.nonzero will not detect '0.0' as zero type
if all(col_1 == "0.0"):
length_nonzero_1 = 0
if all(col_2 == "0.0"):
length_nonzero_2 = 0
length_min = min(length_nonzero_1, length_nonzero_2)
# Check if at least one of the classes contains only zero and avoid the error
if length_min == 0 and length_nonzero_1 == length_nonzero_2:
out.append(
"* Warning: No feature is selected for both two class\n Skipped for this class"
)
continue
elif length_min == 0 and length_nonzero_1 > 0:
out.append(
"* Warning: No feature is selected for this class in TopGenes2\n"
)
out.append(
"* All {} elements are included only in topGenes1:\n".format(
min(length_nonzero_1, nbr_limit)
)
)
for k in range(min(length_nonzero_1, nbr_limit)):
if normW1 is None:
out.append(" (%s)\n" % (str(col_1[k, 0])))
else:
out.append(
" (%s, %s)\n" % (str(col_1[k, 0]), normW1[[name]].iloc[k, 0])
)
continue
elif length_min == 0 and length_nonzero_2 > 0:
out.append(
"* Warning: No feature is selected for this class in TopGenes1\n"
)
out.append(
"* All {} elements are included only in topGenes2:\n".format(
min(length_nonzero_2, nbr_limit)
)
)
for k in range(min(length_nonzero_2, nbr_limit)):
if normW2 is None:
out.append(" (%s)\n" % (str(col_2[k, 0])))
else:
out.append(
" (%s, %s)\n" % (str(col_2[k, 0]), normW2[[name]].iloc[k, 0])
)
continue
if length_min < nbr_limit:
length = length_min
out.append(
"* Warning: In this column, the 1st topGenes has {} nozero elements\n* while the 2nd one has {} nonzero elements\n".format(
length_nonzero_1, length_nonzero_2
)
)
out.append("* So only first %d elements are compared\n\n" % length_min)
else:
length = nbr_limit
set_1 = col_1[0:length]
set_2 = col_2[0:length]
set_common = np.intersect1d(set_1, set_2) # Have in common
set_o1 = np.setdiff1d(set_1, set_2) # Exclusively in topGenes1
set_o2 = np.setdiff1d(set_2, set_1) # Exclusively in topGenes2
lc = len(set_common)
# print exclusively in topGenes1
out.append(
"Included exclusively in first topGenes: {} elements in total.\n".format(
length - lc
)
)
if length - lc > 0:
if normW1 is None:
out.append("Details:(Name)\n")
else:
out.append("Details:(Name,Weight)\n")
idx_i, idx_j = np.where(topGenes1[[name]].isin(set_o1))
for i, j in zip(idx_i, idx_j):
if normW1 is None:
out.append(" (%s)\n" % str(set_1[i, j]))
else:
out.append(
" (%s, %s)\n"
% (str(set_1[i, j]), str(normW1[[name]].iloc[i, j]))
)
out.append("\nNumber of elements in common:{}\n".format(lc))
# print exclusively in topGenes1
out.append(
"\nIncluded exclusively in second topGenes: {} elements in total.\n".format(
length - lc
)
)
if length - lc > 0:
if normW2 is None:
out.append("Details:(Name)\n")
else:
out.append("Details:(Name,Weight)\n")
idx_i, idx_j = np.where(topGenes2[[name]].isin(set_o2))
for i, j in zip(idx_i, idx_j):
if normW2 is None:
out.append(" (%s)\n" % str(set_2[i, j]))
else:
out.append(
" (%s, %s)\n"
% (str(set_2[i, j]), str(normW2[[name]].iloc[i, j]))
)
out.append("{:-<40}\n".format(""))
out = "".join(out)
if printOut == True:
print(out)
return out
def heatmap_classification(
Ytest,
YR,
clusternames,
rotate=45,
draw_fig=False,
save_fig=False,
func_tag=None,
outputPath="../results/",
):
"""
#=====================================================
# It takes the predicted labels (Ytest), true labels (YR)
# and a list of the names of clusters (clusternames)
# as input and provide the heatmap matrix as the output
#=====================================================
"""
k = len(np.unique(YR)) # If we need to automatically find a k
Heatmap_matrix = np.zeros((k, k))
for i in np.arange(k) + 1:
for j in np.arange(k) + 1:
a = np.where(
Ytest[YR == i] == j, 1, 0
).sum() # number Ytest ==j where YR==i
b = np.where(YR == i, 1, 0).sum()
Heatmap_matrix[i - 1, j - 1] = a / b
# Plotting
if draw_fig == True:
plt.figure(figsize=(10, 6))
annot = False
if k > 10:
annot = False
if clusternames is not None:
axes = sns.heatmap(
Heatmap_matrix,
cmap="jet",
annot=annot,
fmt=".2f",
xticklabels=clusternames,
yticklabels=clusternames,
)
else:
axes = sns.heatmap(Heatmap_matrix, cmap="jet", annot=annot, fmt=".2f")
axes.set_xlabel("Predicted true positive", fontsize=14)
axes.set_ylabel("Ground true", fontsize=14)
axes.tick_params(labelsize=7)
plt.xticks(rotation=rotate)
axes.set_title("Heatmap of confusion Matrix", fontsize=14)
plt.tight_layout()
if save_fig == True:
plt.savefig(
"{}{}_Heatmap_of_confusion_Matrix.png".format(outputPath, func_tag)
)
return Heatmap_matrix
def heatmap_normW(
normW,
clusternames=None,
nbr_l=10,
rotate=45,
draw_fig=False,
save_fig=False,
func_tag=None,
outputPath="../results/",
):
"""
#=====================================================
# It takes the predicted labels (Ytest), true labels (YR)
# and the number of clusters (k) as input and provide the
# heatmap matrix as the output
#=====================================================
"""
A = np.abs(normW)
AN = A / A[0, :]
if normW.shape[0] < nbr_l:
nbr_l = normW.shape[0]
ANR = AN[0:nbr_l, :]
annot = False
if draw_fig == True:
plt.figure(figsize=(10, 6))
# axes2=sns.heatmap(ANR,cmap='jet',annot=annot,fmt='.3f')
if clusternames is None:
axes2 = sns.heatmap(
ANR,
cmap="jet",
annot=annot,
fmt=".3f",
yticklabels=np.linspace(1, nbr_l, num=nbr_l, endpoint=True, dtype=int),
)
else:
axes2 = sns.heatmap(
ANR,
cmap="jet",
annot=annot,
fmt=".3f",
xticklabels=clusternames,
yticklabels=np.linspace(1, nbr_l, num=nbr_l, endpoint=True, dtype=int),
)
plt.xticks(rotation=rotate)
axes2.set_ylabel("Features", fontsize=14)
axes2.set_xlabel("Clusters", fontsize=14)
axes2.tick_params(labelsize=7)
axes2.set_title("Heatmap of Matrix W", fontsize=14)
plt.tight_layout()
if save_fig == True:
plt.savefig("{}{}_Heatmap_of_signature.png".format(outputPath, func_tag))
return ANR
def drop_cells_with_ID(X, Y, ID, n_fold):
"""
# ====================================================================
# This function will detect whether the size of the first dimension of
# X is divisible by n_fold. If not, it will remove the n_diff rows from
# the biggest class(with the largest size in Y) where n_diff=len(Y)%n_fold
#
# ---- Input
# X : The data
# Y : The label
# n_fold : The number of fold
# --- Output
# X_new, Y_new : The new data and the new label
# =====================================================================
"""
m, d = X.shape
if m % n_fold == 0:
return X, Y, ID
n_diff = m % n_fold
# choose in the biggest class to delete
# Find the biggest class
lst_count = []
for i in np.unique(Y):
lst_count.append(np.where(Y == i, 1, 0).sum())
ind_max = np.unique(Y)[np.argmax(lst_count)]
lst_inds = np.where(Y == ind_max)[0]
# Delete n_diff elements in the biggest class
lst_del = np.random.choice(lst_inds, n_diff)
X_new = np.delete(X, lst_del, 0)
Y_new = np.delete(Y, lst_del, 0)
ID_new = np.delete(ID, lst_del, 0)
return X_new, Y_new, ID_new
def drop_cells(X, Y, n_fold):
"""
# ====================================================================
# This function will detect whether the size of the first dimension of
# X is divisible by n_fold. If not, it will remove the n_diff rows from
# the biggest class(with the largest size in Y) where n_diff=len(Y)%n_fold
#
# ---- Input
# X : The data
# Y : The label
# n_fold : The number of fold
# --- Output
# X_new, Y_new : The new data and the new label
# =====================================================================
"""
m, d = X.shape
if m % n_fold == 0:
return X, Y
n_diff = m % n_fold
# choose in the biggest class to delete
# Find the biggest class
lst_count = []
for i in np.unique(Y):
lst_count.append(np.where(Y == i, 1, 0).sum())
ind_max = np.unique(Y)[np.argmax(lst_count)]
lst_inds = np.where(Y == ind_max)[0]
# Delete n_diff elements in the biggest class
lst_del = np.random.choice(lst_inds, n_diff)
X_new = np.delete(X, lst_del, 0)
Y_new = np.delete(Y, lst_del, 0)
return X_new, Y_new
# ===================== Algorithms =======================================
def FISTA_Primal(X, YR, k, param):
"""
# ====================================================================
# ---- Input
# X : The data
# YR : The label. Note that this should be an 2D array.
# k : The number of class
# niter : The number of iterations
# gamma : The hyper parameter gamma
# eta : The eta to calculate the projection on l1 ball
# * isEpsilon is not used in the original file in Matlab
# --- Output
# w : The projection matrix
# mu : The centers
# nbGenes_fin : The number of genes of the final step
# loss : The loss for each iteration
# ====================================================================
"""
# === Check the validness of param and the initialization of the params ===
if type(param) is not dict:
raise TypeError("Wrong type of input argument param", type(param))
lst_params = ["niter", "eta", "gamma"] # necessary params
if any(x not in param.keys() for x in lst_params):
raise ValueError(
"Missing parameter in param.\n Need {}.\n Got {} ".format(
lst_params, list(param.keys())
)
)
niter = param["niter"]
eta = param["eta"]
gamma = param["gamma"]
n, d = X.shape
# === With class2indicator():
# Y = class2indicator(YR,k)
# === With Onehotencoder:
Y = OneHotEncoder(categories="auto").fit_transform(YR).toarray()
loss = np.zeros(niter)
XtX = np.matmul(X.T, X)
XtY = np.matmul(X.T, Y)
w_old = np.ones((d, k))
w_loc = w_old
t_old = 1
for i in range(niter):
grad_w = np.matmul(XtX, w_loc) - XtY
# gradient step
V = w_loc - gamma * grad_w
V = np.reshape(V, d * k)
# Projection on the l1 ball
V = proj_l1ball(V, eta)
# Reshape back
w_new = np.reshape(V, (d, k))
# Chambolle method
t_new = (i + 6) / 4 # or i+6 since pyhton starts from 0 ?
w_loc_new = w_new + ((t_old - 1) / t_new) * (w_new - w_old)
w_old = w_new
w_loc = w_loc_new
t_old = t_new
loss[i] = np.linalg.norm(Y - np.matmul(X, w_loc), "fro") ** 2
# end iteratons
w = w_loc
mu = centroids(np.matmul(X, w), YR, k)
nbGenes_fin = nb_Genes(w)[0]
loss = loss / loss[0]
return w, mu, nbGenes_fin, loss
def primal_dual_L1N(X, YR, k, param):
"""
# ====================================================================
# ---- Input
# X : The data
# YR : The label. Note that this should be an 2D array.
# k : The number of class
# param : A type dict paramter which must have keys:
# 'niter', 'eta', 'tau', 'rho','sigma', 'beta', 'tau2' and 'delta'
# Normally speaking:
# (The default value for beta is 0.25.)
# (IF not given, the value of the 'tau2' will be calculated by
# tau2 = 0.25*(1/(np.sqrt(m)*normY)). Note that this normY is
# the 2-norm of the OneHotEncode of the YR given.)
# (Default value of the 'delta' is 1.0)
# --- Output
# w : The projection matrix of size (d,k)
# mu : The centers of classes
# nbGenes_fin : The number of genes of the final result
# loss : The loss for each iteration
# Z : The dual matrix of size (m,k)
# =====================================================================
"""
m, d = X.shape
Y = OneHotEncoder(categories="auto").fit_transform(YR).toarray()
# normY = np.linalg.norm(Y,2)
# === Check the validness of param and the initialization of the params ===
if type(param) is not dict:
raise TypeError("Wrong type of input argument param", type(param))
lst_params = [
"niter",
"eta",
"tau",
"rho",
"sigma",
"delta",
"tau2",
"beta",
] # necessary params
if any(x not in param.keys() for x in lst_params):
raise ValueError(
"Missing parameter in param.\n Need {}.\n Got {} ".format(
lst_params, list(param.keys())
)
)
niter = param["niter"]
eta = param["eta"]
tau = param["tau"]
rho = param["rho"]
sigma = param["sigma"]
delta = param["delta"]
tau2 = param["tau2"]
# beta = param['beta']
# === END check block ===
# Initialization
w_old = np.ones((d, k))
Z_old = np.ones((m, k))
mu_old = np.eye(k, k)
Ik = np.eye(k, k)
loss = np.zeros(niter)
# Main Block
for i in range(niter):
V = w_old + tau * np.matmul(X.T, Z_old)
# Reshape
V = np.reshape(V, d * k)
V = proj_l1ball(V, eta)
V[np.where(np.abs(V) < 0.001)] = 0
# Reshape back
w_new = np.reshape(V, (d, k))
# no gamma here
# w_new = w_new + gamma*(w_new - w_old) =>
w = 2 * w_new - w_old
mu_new = (mu_old + rho * tau2 * Ik - tau2 * np.matmul(Y.T, Z_old)) / (
1 + tau2 * rho
)
# mu = mu_new + gamma*(mu_new - mu_old) =>
mu = 2 * mu_new - mu_old
Z = (Z_old + sigma * (np.matmul(Y, mu) - np.matmul(X, w))) / (1 + sigma * delta)
Z_new = np.maximum(np.minimum(Z, 1), -1)
mu_old = mu_new
w_old = w_new
Z_old = Z_new
loss[i] = np.linalg.norm(
np.matmul(Y, mu_new) - np.matmul(X, w_new), 1
) + 0.5 * (np.linalg.norm(Ik - mu_new, "fro") ** 2)
# End loop
Z = Z_old
w = w_new
mu = mu_new
nbGenes_fin = nb_Genes(w)[0]
loss = loss / loss[0]
return w, mu, nbGenes_fin, loss, Z
def primal_dual_Nuclear(X, YR, k, param):
"""
# ====================================================================
# ---- Input
# X : The data
# YR : The label. Note that this should be an 2D array.
# k : The number of class
# param : A type dict paramter which must have keys:
# 'niter', 'eta_star', 'tau', 'rho','sigma', 'tau2','delta'
# and 'gamma'
# Normally speaking:
# (The default value for beta is 0.25.)
# (IF not given, the value of the 'tau2' will be calculated by
# tau2 = 0.25*(1/(np.sqrt(m)*normY)). Note that this normY is
# the 2-norm of the OneHotEncode of the YR given.)
# (Default value of the 'delta' is 1.0)
# --- Output
# w : The projection matrix of size (d,k)
# mu : The centers of classes
# nbGenes_fin : The number of genes of the final result
# loss : The loss for each iteration
# Z : The dual matrix of size (m,k)
# =====================================================================
"""
m, d = X.shape
Y = OneHotEncoder(categories="auto").fit_transform(YR).toarray()
# === Check the validness of param and the initialization of the params ===
if type(param) is not dict:
raise TypeError("Wrong type of input argument param", type(param))
lst_params = [
"niter",
"eta_star",
"tau",
"rho",
"sigma",
"tau2",
"beta",
] # necessary params
if any(x not in param.keys() for x in lst_params):
raise ValueError(
"Missing parameter in param.\n Need {}.\n Got {} ".format(
lst_params, list(param.keys())
)
)
niter = param["niter"]
eta_star = param["eta_star"]
delta = param["delta"]
tau = param["tau"]
rho = param["rho"]
sigma = param["sigma"]
tau2 = param["tau2"]
# === END check block ===
# Initialization
w_old = np.ones((d, k))
Z_old = np.ones((m, k))
mu_old = np.eye(k, k)
Ik = np.eye(k, k)
loss = np.zeros(niter)
# Main Block
for i in range(niter):
V = w_old + tau * np.matmul(X.T, Z_old)
# Nuclear constraint
L, S0, R = np.linalg.svd(V, full_matrices=False)
norm_nuclear = S0.sum()
vs1 = proj_l1ball(S0.reshape((-1,)), eta_star)
S1 = vs1.reshape(S0.shape)
w = np.matmul(L, S1[..., None] * R)
w = 2 * w - w_old
mu_new = (mu_old + rho * tau2 * Ik - tau2 * np.matmul(Y.T, Z_old)) / (
1 + tau2 * rho
)
mu = 2 * mu_new - mu_old
Z = (Z_old + sigma * (np.matmul(Y, mu) - np.matmul(X, w))) / (1 + sigma * delta)
Z_new = np.maximum(np.minimum(Z, 1), -1)
mu_old = mu_new
w_old = w
Z_old = Z_new
loss[i] = np.linalg.norm(np.matmul(Y, mu_new) - np.matmul(X, w), 1) + 0.5 * (
np.linalg.norm(Ik - mu_new, "fro") ** 2
)
# End loop
Z = Z_old
mu = mu_new
nbGenes_fin, _ = nb_Genes(w)
loss = loss / loss[0]
return w, mu, nbGenes_fin, loss, Z
# ================================== Part 2 ====================================
# ===================== Base Launch functions (scripts) ========================
def basic_run_eta(
func_algo,
func_predict,
X,
YR,
k,
genenames=None,
clusternames=None,
niter=30,
rho=1,
tau=4,
beta=0.25,
delta=1.0,
eta=None,
eta_star=None,
gamma=1,
nfold=4,
rng=1,
showres=True,
keepfig=False,
saveres=False,
outputPath="../results/",
):
"""
# =====================================================================
# Basic function to launch the algorithm of some specific parameters.
# - Input:
# - func_algo (necessary) : The function of the algorithm
# - func_predict (necessary) : The function to predict
# - X (necessary) : The data
# - YR (necessary) : The labels for the data
# - k (necessary) : The number of the clusters
#
# - genenames (optional) : The names of the features of the data
# if not given, it will be
# ['Gene 1','Gene 2',...]
#
# - clusternames (optional) : The clusternames of the data
# if not given, it will be
# ['Class 1', 'Class 2',...]
#
# - niter (optional) : The number of iterations
#
# - rho, tau, beta, delta, : The hyper-parameters for the algo
# eta, gamma, etc (optional)
#
# - nfold (optional) : The number of the folds of the cross validation
#
# - rng (optional) : The seed to control the random funcion
#
# - showres (optional) : Boolean value. True if we want to show
# the results, plot the figures etc.
#
# - saveres (optional) : Boolean value. True to save the results
#
# - outputPath (optional) : String value. The output path.
#
# - Output:
# - mu : The centroids
# - nbm : Number of genes
# - accG : Global accuracy
# - loss : Loss for each iterations
# - W_mean : Mean weight matrix for all folds
# - timeElapsed : Time elapsed for one fold
# - (And the tables) : df_topGenes, df_normW, df_topG_normW,
# df_topGenes_mean, df_normW_mean,
# df_topG_normW_mean, df_acctest
# ======================================================================
"""
np.random.seed(rng) # reproducible
if not os.path.exists(outputPath): # make the directory if it does not exist
os.makedirs(outputPath)
n, d = X.shape
# parameter checking
if genenames is None:
genenames = ["Gene {}".format(i + 1) for i in range(d)]
if clusternames is None:
clusternames = ["Class {}".format(i + 1) for i in range(k)]
# Normalize the mean of datas (Deprecated)
# m = np.mean(X,axis=0)
# X = X-m
# normX = normest(X)
# X = X/normX
# YR = np.array(YR).reshape(-1,1)
if YR.ndim == 1: # In case that OneHotEncoder get 1D array and raise a TypeError
YR = YR.reshape(-1, 1)
Y = OneHotEncoder(categories="auto").fit_transform(YR).toarray()
normY = normest(Y)
normY2 = normY ** 2
# Dropping the cells randomly if the n%d is not zero
# For more details please see instructions in drop_cells
X, YR = drop_cells(X, YR, nfold)
param = {}
param["niter"] = niter
param["rho"] = rho
param["tau"] = tau
tau2 = beta * (1 / (np.sqrt(n) * normY))
param["tau2"] = tau2
eps = 1 / (1 + tau2 * rho * 0.25)
sigma = 1.0 / (tau + (tau2 * eps * normY2)) # Converge until 2.6 for L1Nel
param["sigma"] = sigma
param["delta"] = delta
param["beta"] = beta
param["eta"] = eta
param["eta_star"] = eta_star
param["gamma"] = gamma
# Initialization
nbG = np.zeros(nfold, dtype=int) # Number of genes for each fold
accuracy_train = np.zeros((nfold, k + 1))
accuracy_test = np.zeros((nfold, k + 1))
W0 = np.zeros((d, k, nfold)) # w in each fold
mu0 = np.zeros((k, k, nfold))
W_mean = np.zeros((d, k))
# Z0 = np.zeros((int((nfold-1)*n/nfold),k,nfold))
# Z_mean = np.zeros((int((nfold-1)*n/nfold),k))
loss_iter0 = np.zeros((nfold, niter)) # loss for each iteration of each fold
# W_mean stores w for each eta, where w is the mean of W0 along its third axis
nbG = np.zeros(nfold)
# Parameters printing
print("\nStarts trainning for")
print("{:>6}:{:<6}".format("niter", niter))
if "fista" in func_algo.__name__.lower():
print("{:>6}:{:<6}".format("eta", eta))
print("{:>6}:{:<6}".format("gamma", delta))
elif "or" in func_algo.__name__.lower():
print("{:>6}:{:<6}".format("eta", eta))
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
print("{:>6}:{:<6}".format("delta", delta))
print("{:>6}:{:<6}".format("gamma", delta))
elif "_l2" in func_algo.__name__.lower():
print("{:>6}:{:<6}".format("eta", eta))
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
elif "nuclear" in func_algo.__name__.lower():
print("{:>6}:{:<6}".format("eta_star", eta_star))
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
print("{:>6}:{:<6}".format("delta", delta))
else:
print("{:>6}:{:<6}".format("eta", eta))
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
print("{:>6}:{:<6}".format("delta", delta))
Y_PDS = np.zeros(YR.shape)
meanclassi = np.zeros(nfold)
kf = KFold(n_splits=nfold, random_state=rng, shuffle=True)
w_all, mu_all, nbGenes_all, loss_all = func_algo(X, YR, k, param)[0:4]
for i, (train_ind, test_ind) in enumerate(kf.split(YR)):
print("{:-<30}".format(""))
print("{message:^6} {f1} / {f2}".format(message="fold", f1=i + 1, f2=nfold))
print("-> {} classification...".format(func_algo.__name__))
# ========== Training =========
Xtrain = X[train_ind]
Xtest = X[test_ind]
Ytrain = YR[train_ind]
Ytest = YR[test_ind]
startTime = time.perf_counter()
w, mu, nbGenes, loss = func_algo(Xtrain, Ytrain, k, param)[0:4]
endTime = time.perf_counter()
timeElapsed = endTime - startTime
print("-> Completed.\n-> Time Elapsed:{:.4}s".format(timeElapsed))
W0[:, :, i] = w
mu0[:, :, i] = mu
# Z0[:,:,i] = Z
loss_iter0[i, :] = loss
# ========== Accuracy =========
Ytrain_pred = func_predict(Xtrain, w, mu)
Ytest_pred = func_predict(Xtest, w, mu)
accuracy_train[i, 0], accuracy_train[i, 1 : k + 1] = compute_accuracy(
Ytrain, Ytrain_pred, k
)
accuracy_test[i, 0], accuracy_test[i, 1 : k + 1] = compute_accuracy(
Ytest, Ytest_pred, k
)
meanclassi[i] = np.mean(accuracy_test[i, 1 : k + 1])
nbG[i] = nbGenes
Y_PDS[test_ind] = Ytest_pred
print("{:-<30}".format(""))
# end kfold loop
nbm = int(nbG.mean())
accG = np.mean(accuracy_test[:, 0], axis=0)
Meanclass = meanclassi.mean()
W_mean = np.mean(W0, axis=2)
mu_mean = np.mean(mu0, axis=2)
# Z_mean= np.mean(Z0,axis=2)
normfro = np.linalg.norm(w, "fro")
print("Training step ends.\n")
# Class size
Ctab = []
size_class = np.zeros(k) # Size of each class (real)
size_class_est = np.zeros(k) # Size of each class (estimated)
for j in range(k):
size_class[j] = (YR == (j + 1)).sum()
size_class_est[j] = (Y_PDS == (j + 1)).sum()
Ctab.append("Class {}".format(j + 1))
df_szclass = pd.DataFrame(size_class, index=Ctab, columns=["Class Size"])
df_szclass_est = pd.DataFrame(size_class_est, index=Ctab, columns=["Class Size"])
# Data accuracy
accuracy_train = np.vstack((accuracy_train, np.mean(accuracy_train, axis=0)))
accuracy_test = np.vstack((accuracy_test, np.mean(accuracy_test, axis=0)))
ind_df = []
for i_fold in range(nfold):
ind_df.append("Fold {}".format(i_fold + 1))
ind_df.append("Mean")
columns = ["Global"]
if clusternames is None:
columns += Ctab
else:
columns += clusternames
df_accTrain = pd.DataFrame(accuracy_train, index=ind_df, columns=columns)
df_acctest = pd.DataFrame(accuracy_test, index=ind_df, columns=columns)
# Feature selection
print("Selecting features from whole dataset...", end="")
w, mu, nbGenes, loss = func_algo(X, YR, k, param)[0:4]
topGenes, normW = select_feature_w(w, genenames)
topGenes_mean, normW_mean = select_feature_w(W_mean, genenames)
# Mean of each fold
df_topGenes_mean = pd.DataFrame(topGenes_mean, columns=clusternames)
df_normW_mean = pd.DataFrame(normW_mean, columns=clusternames)
df_topG_normW_mean = merge_topGene_norm(topGenes_mean, normW_mean, clusternames)
# All data
df_topGenes = pd.DataFrame(topGenes, columns=clusternames)
df_normW = pd.DataFrame(normW, columns=clusternames)
df_topG_normW = merge_topGene_norm(topGenes, normW, clusternames)
print("Completed.\n")
# Two heatmaps
M_heatmap_classification = heatmap_classification(
Y_PDS, YR, clusternames, rotate=60
)
M_heatmap_signature = heatmap_normW(normW, clusternames, nbr_l=30, rotate=60)
# Results
if showres == True:
print("Size class (real):")
print(df_szclass)
print("\nSize class (estimated):")
print(df_szclass_est)
print("\nAccuracy Train")
print(df_accTrain)
print("\nAccuracy Test")
print(df_acctest)
if keepfig == False:
plt.close("all")
fig_lossIter = plt.figure(figsize=(8, 6))
plt.plot(np.arange(niter, dtype=int) + 1, loss)
msg_eta = "$\eta$:%d" % eta if eta is not None else ""
msg_etaS = "$\eta*$:%d" % eta_star if eta_star is not None else ""
plt.title(
"loss for each iteration {} {}\n ({})".format(
msg_eta, msg_etaS, func_algo.__name__
),
fontsize=18,
)
plt.ylabel("Loss", fontsize=18)
plt.xlabel("Iteration", fontsize=18)
plt.xticks(np.linspace(1, niter, num=6, endpoint=True, dtype=int))
plt.xlim(left=1, right=niter)
plt.ylim((0, 1))
# Saving Result
if saveres == True:
# define two nametags
nametag_eta = "_eta-%d" % eta if eta is not None else ""
nametag_etaS = "_etaStar-%d" % eta_star if eta_star is not None else ""
# save loss
filename_loss = "loss_{}_beta-{}_delta-{}{}{}_niter-{}.txt".format(
func_algo.__name__, beta, delta, nametag_eta, nametag_etaS, niter
)
np.savetxt(outputPath + filename_loss, loss)
# define function name tag for two heatmaps
func_tag = func_algo.__name__ + nametag_eta + nametag_etaS
# Save heatmaps
filename_heat = "{}{}_Heatmap_of_confusion_Matrix.npy".format(
outputPath, func_tag
)
np.save(filename_heat, M_heatmap_classification)
filename_heat = "{}{}_Heatmap_of_signature_Matrix.npy".format(
outputPath, func_tag
)
np.save(filename_heat, M_heatmap_signature)
df_acctest.to_csv(
"{}{}{}{}_AccuracyTest.csv".format(
outputPath, func_algo.__name__, nametag_eta, nametag_etaS
),
sep=";",
)
df_topG_normW.to_csv(
"{}{}{}{}_TopGenesAndNormW.csv".format(
outputPath, func_algo.__name__, nametag_eta, nametag_etaS
),
sep=";",
)
# Other possiblilities to save
# fig_lossIter.savefig('{}{}{}{}_niter-{}_loss_iters.png'.format(outputPath,func_algo.__name__,nametag_eta,nametag_etaS,niter))
# All data
# df_topGenes.to_csv('{}{}_TopGenes.csv'.format(outputPath,func_algo.__name__),sep=';')
# df_normW.to_csv('{}{}_NormW.csv'.format(outputPath,func_algo.__name__),sep=';')
# Mean of each fold
# df_topGenes_mean.to_csv('{}{}_TopGenes_mean.csv'.format(outputPath,func_algo.__name__),sep=';')
# df_normW_mean.to_csv('{}{}_NormW_mean.csv'.format(outputPath,func_algo.__name__),sep=';')
# df_topG_normW_mean.to_csv('{}{}_TopGenesAndNormW_mean.csv'.format(outputPath,func_algo.__name__),sep=';')
return (
mu_mean,
nbm,
accG,
loss,
W_mean,
timeElapsed,
df_topGenes,
df_normW,
df_topG_normW,
df_topGenes_mean,
df_normW_mean,
df_topG_normW_mean,
df_acctest,
w_all,
)
# ===================== ========================================================
def getPredLabel(Ypred):
for i in range(Ypred.shape[0]):
if Ypred[i] > 1.5:
Ypred[i] = 2
if Ypred[i] <= 1.5:
Ypred[i] = 1
return Ypred
# =====================Functions used to compare different algorithms========================================================
def getCoefs(alg, model):
if alg == "RF":
coef = model.feature_importances_
if alg == "svm":
coef = model.coef_.transpose()
if alg == "plsda":
coef = model.coef_
return coef
# =====================Functions used to compute the ranked features and their weights=======================
def TopGenbinary(w, feature_names):
n = len(w)
difference = np.zeros(n)
for i in range(n):
difference[i] = w[i][0] - w[i][1]
df1 = pd.DataFrame(feature_names, columns=["pd"])
df1["weights"] = difference
# =====Sort the difference based on the absolute value=========
df1["sort_helper"] = df1["weights"].abs()
df2 = df1.sort_values(by="sort_helper", ascending=False).drop("sort_helper", axis=1)
# ==== end_sort=============
return df2
def rankFeatureHelper(alg, coef, feature_names):
df1 = pd.DataFrame(feature_names, columns=[alg])
df1["weights"] = coef
df1["sort_helper"] = df1["weights"].abs()
df2 = df1.sort_values(by="sort_helper", ascending=False).drop("sort_helper", axis=1)
return df2
def rankFeatures(X, Yr, algList, feature_names):
# flag=0
featureList = []
for alg in algList:
if alg == "svm":
clf = SVC(probability=True, kernel="linear")
model = clf.fit(X, Yr.ravel())
coef = model.coef_.transpose()
df_rankFeature = rankFeatureHelper(alg, coef, feature_names)
featureList.append(df_rankFeature)
if alg == "RF":
clf = RandomForestClassifier(n_estimators=400, random_state=10, max_depth=3)
model = clf.fit(X, Yr.ravel())
coef = model.feature_importances_
df_rankFeature = rankFeatureHelper(alg, coef, feature_names)
featureList.append(df_rankFeature)
if alg == "plsda":
clf = PLSRegression(n_components=4, scale=False)
model = clf.fit(X, Yr.ravel())
coef = model.coef_
df_rankFeature = rankFeatureHelper(alg, coef, feature_names)
featureList.append(df_rankFeature)
# if flag == 0:
# df_rankFeature = TopGenbinary(coef, feature_names)
# flag =1
# else:
# df_feature = TopGenbinary(coef, feature_names)
# df_rankFeature
return featureList
# ===============================Compute the \rho==============================
def basic_run_eta_molecule(
X,
YR,
ID,
k,
genenames=None,
clusternames=None,
niter=30,
rho=1,
tau=4,
beta=0.25,
delta=1.0,
eta=500,
gamma=1,
nfold=4,
random_seed=1,
):
"""
# =====================================================================
# This function is used to compute the df_confidence
# Basic function to launch the algorithm of some specific parameters.
# - Input:
# The function of the algorithm: primal_dual_L1N
# The function to predict: predict_L1_molecule
# - X (necessary) : The data
# - YR (necessary) : The labels for the data
# - k (necessary) : The number of the clusters
#
# - genenames (optional) : The names of the features of the data
# if not given, it will be
# ['Gene 1','Gene 2',...]
#
# - clusternames (optional) : The clusternames of the data
# if not given, it will be
# ['Class 1', 'Class 2',...]
#
# - niter (optional) : The number of iterations
#
# - rho, tau, beta, delta, : The hyper-parameters for the algo
# eta, gamma (optional)
#
# - nfold (optional) : The number of the folds of the cross validation
#
# - rng (optional) : The seed to control the random funcion
#
# - Output:
# - Yprediction : list of Predicted labels
# ======================================================================
"""
np.random.seed(random_seed) # reproducible
n, d = X.shape
# parameter checking
if genenames is None:
genenames = ["Gene {}".format(i + 1) for i in range(d)]
if clusternames is None:
clusternames = ["Class {}".format(i + 1) for i in range(k)]
if YR.ndim == 1: # In case that OneHotEncoder get 1D array and raise a TypeError
YR = YR.reshape(-1, 1)
Y = OneHotEncoder(categories="auto").fit_transform(YR).toarray()
normY = normest(Y)
normY2 = normY ** 2
# Dropping the cells randomly if the n%d is not zero
# See more details in drop_cells
X, YR, Ident = drop_cells_with_ID(X, YR, ID, nfold)
dico = dict(list(enumerate(Ident)))
ref = pd.DataFrame.from_dict(dico, orient="index")
param = {}
param["niter"] = niter
param["rho"] = rho
param["tau"] = tau
tau2 = beta * (1 / (np.sqrt(n) * normY))
param["tau2"] = tau2
eps = 1 / (1 + tau2 * rho * 0.25)
sigma = 1.0 / (tau + (tau2 * eps * normY2)) # Converge until 2.6 for L1Nel
param["sigma"] = sigma
param["delta"] = delta
param["beta"] = beta
param["eta"] = eta
param["gamma"] = gamma
# Initialization
nbG = np.zeros(nfold, dtype=int) # Number of genes for each fold
W0 = np.zeros((d, k, nfold)) # w in each fold
mu0 = np.zeros((k, k, nfold))
# Z0 = np.zeros((int((nfold-1)*n/nfold),k,nfold))
# Z_mean = np.zeros((int((nfold-1)*n/nfold),k))
loss_iter0 = np.zeros((nfold, niter)) # loss for each iteration of each fold
# W_mean stores w for each eta, where w is the mean of W0 along its third axis
nbG = np.zeros(nfold)
# Parameters printing
print("\nStarts trainning for")
print("{:>6}:{:<6}".format("niter", niter))
print("{:>6}:{:<6}".format("eta", eta))
if "fista" in primal_dual_L1N.__name__.lower():
print("{:>6}:{:<6}".format("gamma", delta))
elif "or" in primal_dual_L1N.__name__.lower():
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
print("{:>6}:{:<6}".format("delta", delta))
print("{:>6}:{:<6}".format("gamma", delta))
elif "_l2" in primal_dual_L1N.__name__.lower():
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
else:
print("{:>6}:{:<6}".format("rho", rho))
print("{:>6}:{:<6}".format("tau", tau))
print("{:>6}:{:<6}".format("beta", beta))
print("{:>6}:{:<6}".format("tau_mu", tau2))
print("{:>6}:{:<6}".format("sigma", sigma))
print("{:>6}:{:<6}".format("delta", delta))
Yprediction = []
Confidence = []
# accuracy_train = np.zeros((nfold,k+1))
# accuracy_test = np.zeros((nfold,k+1))
ID = []
Ident = []
kf = KFold(n_splits=nfold, random_state=random_seed, shuffle=True)
w_all, mu_all, nbGenes_all, loss_all = primal_dual_L1N(X, YR, k, param)[0:4]
for i, (train_ind, test_ind) in enumerate(kf.split(YR)):
print("{:-<30}".format(""))
print("{message:^6} {f1} / {f2}".format(message="fold", f1=i + 1, f2=nfold))
print("-> {} classification...".format(primal_dual_L1N.__name__))
# ========== Training =========
dico = dico
Xtrain = X[train_ind]
Ytrain = YR[train_ind]
Xtest = X[test_ind]
startTime = time.perf_counter()
w, mu, nbGenes, loss = primal_dual_L1N(Xtrain, Ytrain, k, param)[0:4]
endTime = time.perf_counter()
timeElapsed = endTime - startTime
print("-> Completed.\n-> Time Elapsed:{:.4}s".format(timeElapsed))
W0[:, :, i] = w
mu0[:, :, i] = mu
loss_iter0[i, :] = loss
# ========== Prediction =========
Ypred, conf = predict_L1_molecule(Xtest, w, mu)
Yprediction.append(Ypred)
Confidence.append(conf)
ID.append(test_ind)
Ident.append(ref.iloc[test_ind])
nbG[i] = nbGenes
print("{:-<30}".format(""))
# end kfold loop
return Yprediction, Confidence, ID, Ident, YR, ref
# ===================== Base Launch functions (scripts) ========================
def basic_run_eta_compare(
func_algo,
func_predict,
X,
YR,
k,
alglist,
genenames=None,
clusternames=None,
niter=30,
rho=1,
tau=4,
beta=0.25,
delta=1.0,
eta=None,
eta_star=None,
gamma=1,
nfold=4,
rng=1,
showres=False,
keepfig=False,
saveres=False,
outputPath="../results/",
):
"""
# =====================================================================
# Basic function to launch the algorithm of some specific parameters.
# - Input:
# - func_algo (necessary) : The function of the algorithm
# - func_predict (necessary) : The function to predict
# - X (necessary) : The data
# - YR (necessary) : The labels for the data
# - k (necessary) : The number of the clusters
#
# - genenames (optional) : The names of the features of the data
# if not given, it will be
# ['Gene 1','Gene 2',...]
#
# - clusternames (optional) : The clusternames of the data
# if not given, it will be
# ['Class 1', 'Class 2',...]
#
# - niter (optional) : The number of iterations
#
# - rho, tau, beta, delta, : The hyper-parameters for the algo
# eta, gamma, etc (optional)
#
# - nfold (optional) : The number of the folds of the cross validation
#
# - rng (optional) : The seed to control the random funcion
#
# - showres (optional) : Boolean value. True if we want to show
# the results, plot the figures etc.
#
# - saveres (optional) : Boolean value. True to save the results
#
# - alglist (optional) : The seed to control the random funcion
#
# - outputPath (optional) : String value. The output path.
#
#
# - Output:
# - mu : The centroids
# - nbm : Number of genes
# - accG : Global accuracy
# - loss : Loss for each iterations
# - W_mean : Mean weight matrix for all folds
# - timeElapsed : Time elapsed for one fold
# - (And the tables) : df_topGenes, df_normW, df_topG_normW,
# df_topGenes_mean, df_normW_mean,
# df_topG_normW_mean, df_acctest
# ======================================================================
"""
np.random.seed(rng) # reproducible
if not os.path.exists(outputPath): # make the directory if it does not exist
os.makedirs(outputPath)
n, d = X.shape
# parameter checking
if genenames is None:
genenames = ["Gene {}".format(i + 1) for i in range(d)]
if clusternames is None:
clusternames = ["Class {}".format(i + 1) for i in range(k)]
# Normalize the mean of datas (Deprecated)
# m = np.mean(X,axis=0)
# X = X-m
# normX = normest(X)
# X = X/normX
# YR = np.array(YR).reshape(-1,1)
if YR.ndim == 1: # In case that OneHotEncoder get 1D array and raise a TypeError
YR = YR.reshape(-1, 1)
Y = OneHotEncoder(categories="auto").fit_transform(YR).toarray()
normY = normest(Y)
normY2 = normY ** 2
# Dropping the cells randomly if the n%d is not zero
# For more details please see instructions in drop_cells
X, YR = drop_cells(X, YR, nfold)
param = {}
param["niter"] = niter
param["rho"] = rho
param["tau"] = tau
tau2 = beta * (1 / (np.sqrt(n) * normY))
param["tau2"] = tau2
eps = 1 / (1 + tau2 * rho * 0.25)
sigma = 1.0 / (tau + (tau2 * eps * normY2)) # Converge until 2.6 for L1Nel
param["sigma"] = sigma
param["delta"] = delta
param["beta"] = beta
param["eta"] = eta
param["eta_star"] = eta_star
param["gamma"] = gamma
# Initialization
nbG = np.zeros(nfold, dtype=int) # Number of genes for each fold
accuracy_train = np.zeros((nfold, k + 1))
accuracy_test = np.zeros((nfold, k + 1))
auc_train = np.zeros((nfold))
auc_test = np.zeros((nfold))
sil_train = np.zeros((nfold))
W0 = np.zeros((d, k, nfold)) # w in each fold
mu0 = np.zeros((k, k, nfold))
W_mean = np.zeros((d, k))
# Z0 = np.zeros((int((nfold-1)*n/nfold),k,nfold))
# Z_mean = np.zeros((int((nfold-1)*n/nfold),k))
loss_iter0 = np.zeros((nfold, niter)) # loss for each iteration of each fold
# W_mean stores w for each eta, where w is the mean of W0 along its third axis
nbG = np.zeros(nfold)
# Parameters printing
# print('\nStarts trainning for')
# print('{:>6}:{:<6}'.format('niter',niter))
Y_PDS = np.zeros(YR.shape)
meanclassi = np.zeros(nfold)
kf = KFold(n_splits=nfold, random_state=rng, shuffle=True)
numalg = len(alglist)
accuracy_train_comp = np.zeros((nfold, numalg))
accuracy_test_comp = np.zeros((nfold, numalg))
AUC_train_comp = np.zeros((nfold, numalg * 4))
AUC_test_comp = np.zeros((nfold, numalg * 4))
timeElapsedMatrix = np.zeros((nfold, numalg + 1))
w_all, mu_all, nbGenes_all, loss_all = func_algo(X, YR, k, param)[0:4]
# 4-flod cross validation
for i, (train_ind, test_ind) in enumerate(kf.split(YR)):
print("{:-<30}".format(""))
print("{message:^6} {f1} / {f2}".format(message="fold", f1=i + 1, f2=nfold))
# ========== Training =========
Xtrain = X[train_ind]
Xtest = X[test_ind]
Ytrain = YR[train_ind]
Ytest = YR[test_ind]
Ytr = pd.get_dummies(Ytrain.ravel()).values.T.T
Yte = pd.get_dummies(Ytest.ravel())
startTime = time.perf_counter()
w, mu, nbGenes, loss = func_algo(Xtrain, Ytrain, k, param)[0:4]
endTime = time.perf_counter()
timeElapsed = endTime - startTime
timeElapsedMatrix[i][numalg] = timeElapsed
print("-> Time Elapsed:{:.4}s".format(timeElapsed))
W0[:, :, i] = w
mu0[:, :, i] = mu
# Z0[:,:,i] = Z
loss_iter0[i, :] = loss
# ========== Accuracy =========
Ytrain_pred = func_predict(Xtrain, w, mu)
Ytest_pred = func_predict(Xtest, w, mu)
accuracy_train[i, 0], accuracy_train[i, 1 : k + 1] = compute_accuracy(
Ytrain, Ytrain_pred, k
)
accuracy_test[i, 0], accuracy_test[i, 1 : k + 1] = compute_accuracy(
Ytest, Ytest_pred, k
)
if (
np.unique(Ytest).shape[0] == 2
and np.unique(Ytest_pred.astype("int64")).shape[0] == 2
):
auc_test[i] = roc_auc_score(Ytest_pred.astype("int64"), Ytest)
auc_train[i] = roc_auc_score(Ytrain_pred.astype("int64"), Ytrain)
meanclassi[i] = np.mean(accuracy_test[i, 1 : k + 1])
nbG[i] = nbGenes
Y_PDS[test_ind] = Ytest_pred
# start loop of other algorithms' comparison
for j in range(numalg):
alg = alglist[j]
if alg == "svm":
tuned_parameters = [
{"kernel": ["rbf"], "gamma": [1e-3, 1e-4], "C": [1, 10, 100, 1000]},
{"kernel": ["linear"], "C": [1, 10, 100, 1000]},
]
clf = GridSearchCV(SVC(), tuned_parameters)
# clf = SVC(probability=True,kernel='linear')
if alg == "RF":
clf = RandomForestClassifier(
n_estimators=400, random_state=10, max_depth=3
)
if alg == "plsda":
clf = PLSRegression(n_components=4, scale=False)
# build the model
startTime = time.perf_counter()
# clf = OneVsRestClassifier(clf)
model = clf.fit(Xtrain, Ytrain.ravel())
# model = clf.fit(X,Ytr)
# if (alg == 'svm'):
# print(clf.best_params_)
endTime = time.perf_counter()
timeElapsedMatrix[i][j] = endTime - startTime
if k > 2:
Ypred_test = np.around(
model.predict(Xtest)
).ravel() # getPredLabel(model.predict(Xtest))
Ypred_train = np.around(
model.predict(Xtrain)
).ravel() # getPredLabel(model.predict(Xtrain))
else:
Ypred_test = getPredLabel(model.predict(Xtest))
Ypred_train = getPredLabel(model.predict(Xtrain))
accuracy_test_comp[i][j] = accuracy_score(Ypred_test.astype("int64"), Ytest)
accuracy_train_comp[i][j] = accuracy_score(
Ypred_train.astype("int64"), Ytrain
)
# print("sil = ", metrics.silhouette_score(model.x_scores_, Ypred_train) )
if alg == "plsda":
sil_train[i] = metrics.silhouette_score(model.x_scores_, Ypred_train)
if (
np.unique(Ytest).shape[0] == 2
and np.unique(Ypred_test.astype("int64")).shape[0] == 2
):
AUC_test_comp[i][j * 4] = roc_auc_score(
Ypred_test.astype("int64"), Ytest
)
AUC_train_comp[i][j * 4] = roc_auc_score(
Ypred_train.astype("int64"), Ytrain
)
# F1 precision recal
AUC_train_comp[i][
j * 4 + 1 : j * 4 + 4
] = metrics.precision_recall_fscore_support(
Ytrain, Ypred_train.astype("int64"), average="macro"
)[
:-1
]
AUC_test_comp[i][
j * 4 + 1 : j * 4 + 4
] = metrics.precision_recall_fscore_support(
Ytest, Ypred_test.astype("int64"), average="macro"
)[
:-1
]
# end kfold loop
nbm = int(nbG.mean())
accG = np.mean(accuracy_test[:, 0], axis=0)
Meanclass = meanclassi.mean()
W_mean = np.mean(W0, axis=2)
mu_mean = np.mean(mu0, axis=2)
# Z_mean= np.mean(Z0,axis=2)
normfro = np.linalg.norm(w, "fro")
# Class size
Ctab = []
size_class = np.zeros(k) # Size of each class (real)
size_class_est = np.zeros(k) # Size of each class (estimated)
for j in range(k):
size_class[j] = (YR == (j + 1)).sum()
size_class_est[j] = (Y_PDS == (j + 1)).sum()
Ctab.append("Class {}".format(j + 1))
df_szclass = pd.DataFrame(size_class, index=Ctab, columns=["Class Size"])
df_szclass_est = pd.DataFrame(size_class_est, index=Ctab, columns=["Class Size"])
# Data accuracy
accuracy_train = np.vstack((accuracy_train, np.mean(accuracy_train, axis=0)))
accuracy_test = np.vstack((accuracy_test, np.mean(accuracy_test, axis=0)))
# auc_train = np.vstack((auc_train,np.mean(auc_train,axis=0)))
# auc_test = np.vstack((auc_test,np.mean(auc_test,axis=0)))
ind_df = []
for i_fold in range(nfold):
ind_df.append("Fold {}".format(i_fold + 1))
ind_df.append("Mean")
columns = ["Global"]
if clusternames is None:
columns += Ctab
else:
columns += clusternames
df_accTrain = pd.DataFrame(accuracy_train, index=ind_df, columns=columns)
df_acctest = pd.DataFrame(accuracy_test, index=ind_df, columns=columns)
# Data accuracy1
ind_df_comp = []
for i_fold in range(nfold):
ind_df_comp.append("Fold {}".format(i_fold + 1))
df_comp = pd.DataFrame(accuracy_test_comp, index=ind_df_comp, columns=alglist)
df_comp.loc["Mean"] = df_comp.mean()
df_comp["pd"] = df_acctest["Global"]
colauc = []
for met in alglist:
colauc.append(met + " AUC")
colauc.append(met + " Precision")
colauc.append(met + " Recall")
colauc.append(met + " F1 score")
df_compauc = pd.DataFrame(AUC_test_comp, index=ind_df_comp, columns=colauc)
df_compauc["pd"] = auc_test
df_compauc["sil_plsda"] = sil_train
df_compauc.loc["Mean"] = df_compauc.mean()
alglen = len(alglist)
alglist1 = []
for i in range(alglen):
alglist1.append(alglist[i])
alglist1.append("pd")
df_timeElapsed = pd.DataFrame(
timeElapsedMatrix, index=ind_df_comp, columns=alglist1
)
df_timeElapsed.loc["Mean"] = df_timeElapsed.mean()
# Feature selection
print("Selecting features from whole dataset...", end="")
w, mu, nbGenes, loss = func_algo(X, YR, k, param)[0:4]
topGenes, normW = select_feature_w(w, genenames)
topGenes_mean, normW_mean = select_feature_w(W_mean, genenames)
# Mean of each fold
df_topGenes_mean = pd.DataFrame(topGenes_mean, columns=clusternames)
df_normW_mean = pd.DataFrame(normW_mean, columns=clusternames)
df_topG_normW_mean = merge_topGene_norm(topGenes_mean, normW_mean, clusternames)
# All data
df_topGenes = pd.DataFrame(topGenes, columns=clusternames)
df_normW = pd.DataFrame(normW, columns=clusternames)
df_topG_normW = merge_topGene_norm(topGenes, normW, clusternames)
print("Completed.\n")
# Two heatmaps
# M_heatmap_classification = heatmap_classification(Y_PDS,YR,clusternames,rotate=60)
# M_heatmap_signature = heatmap_normW(normW,clusternames,nbr_l=30,rotate=60)
# Results
if showres == True:
print("Size class (real):")
print(df_szclass)
print("\nSize class (estimated):")
print(df_szclass_est)
print("\nAccuracy Train")
print(df_accTrain)
print("\nAccuracy Test")
print(df_acctest)
if keepfig == False:
plt.close("all")
fig_lossIter = plt.figure(figsize=(8, 6))
plt.plot(np.arange(niter, dtype=int) + 1, loss)
msg_eta = "$\eta$:%d" % eta if eta is not None else ""
msg_etaS = "$\eta*$:%d" % eta_star if eta_star is not None else ""
plt.title(
"loss for each iteration {} {}\n ({})".format(
msg_eta, msg_etaS, func_algo.__name__
),
fontsize=18,
)
plt.ylabel("Loss", fontsize=18)
plt.xlabel("Iteration", fontsize=18)
plt.xticks(np.linspace(1, niter, num=6, endpoint=True, dtype=int))
plt.xlim(left=1, right=niter)
plt.ylim((0, 1))
# Saving Result
if saveres == True:
# define two nametags
nametag_eta = "_eta-%d" % eta if eta is not None else ""
nametag_etaS = "_etaStar-%d" % eta_star if eta_star is not None else ""
# save loss
# filename_loss = 'loss_{}_beta-{}_delta-{}{}{}_niter-{}.txt'.format(func_algo.__name__,beta,delta, nametag_eta,nametag_etaS,niter)
# np.savetxt(outputPath + filename_loss,loss)
# define function name tag for two heatmaps
# func_tag = func_algo.__name__ + nametag_eta + nametag_etaS
# Save heatmaps
# filename_heat = '{}{}_Heatmap_of_confusion_Matrix.npy'.format(outputPath,func_tag)
# np.save(filename_heat,M_heatmap_classification)
# filename_heat = '{}{}_Heatmap_of_signature_Matrix.npy'.format(outputPath,func_tag)
# np.save(filename_heat,M_heatmap_signature)
df_acctest.to_csv(
"{}{}{}{}_AccuracyTest.csv".format(
outputPath, func_algo.__name__, nametag_eta, nametag_etaS
),
sep=";",
)
df_topG_normW.to_csv(
"{}{}{}{}_TopGenesAndNormW.csv".format(
outputPath, func_algo.__name__, nametag_eta, nametag_etaS
),
sep=";",
)
# Other possiblilities to save
# fig_lossIter.savefig('{}{}{}{}_niter-{}_loss_iters.png'.format(outputPath,func_algo.__name__,nametag_eta,nametag_etaS,niter))
# All data
# df_topGenes.to_csv('{}{}_TopGenes.csv'.format(outputPath,func_algo.__name__),sep=';')
# df_normW.to_csv('{}{}_NormW.csv'.format(outputPath,func_algo.__name__),sep=';')
# Mean of each fold
# df_topGenes_mean.to_csv('{}{}_TopGenes_mean.csv'.format(outputPath,func_algo.__name__),sep=';')
# df_normW_mean.to_csv('{}{}_NormW_mean.csv'.format(outputPath,func_algo.__name__),sep=';')
# df_topG_normW_mean.to_csv('{}{}_TopGenesAndNormW_mean.csv'.format(outputPath,func_algo.__name__),sep=';')
return (
mu_mean,
nbm,
accG,
loss,
W_mean,
timeElapsed,
df_topGenes,
df_normW,
df_topG_normW,
df_topGenes_mean,
df_normW_mean,
df_topG_normW_mean,
df_acctest,
df_comp,
df_timeElapsed,
w_all,
df_compauc,
)
import warnings
def readData(filename):
DATADIR = "datas/"
# df_X = pd.read_csv(DATADIR+'LUNG.csv',delimiter=';', decimal=",",header=0,encoding="ISO-8859-1", low_memory=False)
df_X = pd.read_csv(
DATADIR + str(filename),
delimiter=";",
decimal=",",
header=0,
encoding="ISO-8859-1",
low_memory=False,
)
# df_X = pd.read_csv(DATADIR+'COVID.csv',delimiter=';', decimal=",",header=0,encoding="ISO-8859-1", low_memory=False)
df_names = df_X["Name"]
feature_names = df_names[1:].values.astype(str)
X = df_X.iloc[1:, 1:].values.astype(float).transpose()
Yr = df_X.iloc[0, 1:].values.astype(float)
nbr_clusters = len(np.unique(Yr))
feature_names = df_names.values.astype(str)[1:]
label_name = df_names
for index, label in enumerate(
label_name
): # convert string labels to numero (0,1,2....)
with warnings.catch_warnings():
warnings.simplefilter(action="ignore", category=FutureWarning)
Yr = np.where(Yr == label, index, Yr)
Yr = Yr.astype(np.int64)
return X, Yr, nbr_clusters, feature_names
def basic_run_other(
X,
YR,
k,
alglist,
genenames=None,
clusternames=None,
nfold=4,
rng=6,
doTopGenes=False,
):
np.random.seed(rng) # reproducible
n, d = X.shape # n is never used
# parameter checking
if genenames is None:
genenames = ["Gene {}".format(i + 1) for i in range(d)]
if clusternames is None:
clusternames = ["Class {}".format(i + 1) for i in range(k)]
if YR.ndim == 1: # In case that OneHotEncoder get 1D array and raise a TypeError
YR = YR.reshape(-1, 1)
# Dropping the cells randomly if the n%d is not zero
# For more details please see instructions in drop_cells
X, YR = drop_cells(X, YR, nfold)
# Initialization
sil_train = np.zeros((nfold))
kf = KFold(n_splits=nfold, random_state=rng, shuffle=True)
numalg = len(alglist)
accuracy_train_comp = np.zeros((nfold, numalg))
accuracy_test_comp = np.zeros((nfold, numalg))
AUC_train_comp = np.zeros((nfold, numalg * 4))
AUC_test_comp = np.zeros((nfold, numalg * 4))
timeElapsedMatrix = np.zeros((nfold, numalg))
top_features_list = []
# 4-flod cross validation
for i, (train_ind, test_ind) in enumerate(kf.split(YR)):
print("{:-<30}".format(""))
print("{message:^6} {f1} / {f2}".format(message="fold", f1=i + 1, f2=nfold))
# ========== Training =========
Xtrain = X[train_ind]
Xtest = X[test_ind]
Ytrain = YR[train_ind]
Ytest = YR[test_ind]
# start loop of other algorithms' comparison
for j, alg in enumerate(alglist):
get_features = lambda m: None
if alg == "svm":
tuned_parameters = [
{"kernel": ["linear"], "C": [1, 10, 100, 1000]},
]
clf = GridSearchCV(SVC(), tuned_parameters)
get_features = lambda m: m.best_estimator_.coef_.transpose()
if alg == "RF":
clf = RandomForestClassifier(
n_estimators=400, random_state=10, max_depth=3
)
get_features = lambda m: m.feature_importances_
if alg == "plsda":
clf = PLSRegression(n_components=4, scale=False)
get_features = lambda m: m.coef_
if alg == "logreg":
clf = LogisticRegression(C=10)
get_features = lambda m: m.coef_.transpose()
if alg == "NN":
clf = KNeighborsClassifier(n_neighbors=50)
if alg == "GaussianNB":
clf = GaussianNB(var_smoothing=1e-9) # var smoothing to be tuned
if alg == "Adaboost":
clf = AdaBoostClassifier(n_estimators=100) # parameters to be tuned
get_features = lambda m: m.feature_importances_
if alg == "Lasso":
lasso = Lasso(random_state=0, max_iter=10000)
alphas = np.logspace(-4, -0.5, 20)
tuned_parameters = [{"alpha": alphas}]
n_folds = 5
clf = GridSearchCV(lasso, tuned_parameters, cv=n_folds)
get_features = lambda m: m.best_estimator_.coef_
# build the model
startTime = time.perf_counter()
model = clf.fit(Xtrain, Ytrain.ravel())
endTime = time.perf_counter()
timeElapsedMatrix[i][j] = endTime - startTime
if k > 2:
Ypred_test = np.around(
model.predict(Xtest)
).ravel() # getPredLabel(model.predict(Xtest))
Ypred_train = np.around(
model.predict(Xtrain)
).ravel() # getPredLabel(model.predict(Xtrain))
else:
Ypred_test = getPredLabel(model.predict(Xtest)).ravel()
Ypred_train = getPredLabel(model.predict(Xtrain)).ravel()
accuracy_test_comp[i][j] = accuracy_score(Ypred_test.astype("int64"), Ytest)
accuracy_train_comp[i][j] = accuracy_score(
Ypred_train.astype("int64"), Ytrain
)
if alg == "plsda":
sil_train[i] = metrics.silhouette_score(model.x_scores_, Ypred_train)
if (
np.unique(Ytest).shape[0] == 2
and np.unique(Ypred_test.astype("int64")).shape[0] == 2
):
AUC_test_comp[i][j * 4] = roc_auc_score(
Ypred_test.astype("int64"), Ytest
)
AUC_train_comp[i][j * 4] = roc_auc_score(
Ypred_train.astype("int64"), Ytrain
)
# F1 precision recall
# Note: for some models, these are not defined
# (for example, the Lasso)
# In those cases, the undefined scores are set to 0,
# And no warning is raised
# Cf. the zero_division=0 parameter.
AUC_train_comp[i][
j * 4 + 1 : j * 4 + 4
] = metrics.precision_recall_fscore_support(
Ytrain, Ypred_train.astype("int64"), average="macro", zero_division=0
)[
:-1
]
AUC_test_comp[i][
j * 4 + 1 : j * 4 + 4
] = metrics.precision_recall_fscore_support(
Ytest, Ypred_test.astype("int64"), average="macro", zero_division=0
)[
:-1
]
# get the topgenes from the first fold
if i == 0 and doTopGenes:
coef = get_features(clf)
if coef is not None:
df_rankFeature = rankFeatureHelper(alg, coef, genenames)
else:
df_rankFeature = rankFeatureHelper(
alg, [0] * len(genenames), genenames
)
top_features_list.append(df_rankFeature)
# Data accuracy1
ind_df_comp = []
for i_fold in range(nfold):
ind_df_comp.append("Fold {}".format(i_fold + 1))
df_comp = pd.DataFrame(accuracy_test_comp, index=ind_df_comp, columns=alglist)
df_comp.loc["Mean"] = df_comp.mean()
colauc = []
for met in alglist:
colauc.append(met + " AUC")
colauc.append(met + " Precision")
colauc.append(met + " Recall")
colauc.append(met + " F1 score")
df_compauc = pd.DataFrame(AUC_test_comp, index=ind_df_comp, columns=colauc)
df_compauc["sil_plsda"] = sil_train
df_compauc.loc["Mean"] = df_compauc.mean()
alglen = len(alglist)
alglist1 = []
for i in range(alglen):
alglist1.append(alglist[i])
df_timeElapsed = pd.DataFrame(
timeElapsedMatrix, index=ind_df_comp, columns=alglist1
)
df_timeElapsed.loc["Mean"] = df_timeElapsed.mean()
return df_comp, df_timeElapsed, df_compauc, top_features_list
def basic_run_tabeta(
func_algo,
func_predict,
X,
YR,
k,
genenames=None,
clusternames=None,
niter=30,
rho=1,
tau=1.5,
beta=0.25,
delta=1.0,
tabeta=[100, 200, 400],
gamma=1,
nfold=4,
rng=1,
showres=True,
saveres=False,
keepfig=False,
outputPath="../results/",
):
"""
It shares the same input as function basic_run_eta except that the eta is
replaced by tabeta. It has also all the output of that of the basic_run_eta
but it has its own 5 more output:
nbm_etas,accG_etas,loss_iter,W_mean_etas,timeElapsed_etas
Note : For now the funciton will save the output, show the figures and save the
results for only the last experiment, i.e. only for the last eta.
This mechanism will be changed in the future update.
"""
n_etas = len(tabeta)
n, d = X.shape
# W_mean_etas stores w for each eta, where w is the mean of W0 along its third axis
W_mean_etas = np.zeros((d, k, n_etas))
loss_iter = np.zeros((n_etas, niter)) # loss for each iteration of each eta
nbm_etas = np.zeros(n_etas, dtype=int)
accG_etas = np.zeros(n_etas)
timeElapsed_etas = np.zeros(n_etas)
for i, eta in enumerate(tabeta):
if i == (n_etas - 1):
(
mu,
nbm,
accG,
loss,
W_mean,
timeElapsed,
topGenes,
normW,
topGenes_normW,
topGenes_mean,
normW_mean,
topGenes_normW_mean,
acctest,
) = basic_run_eta(
func_algo,
func_predict,
X,
YR,
k,
genenames,
clusternames,
eta=eta,
niter=niter,
rho=rho,
tau=tau,
beta=beta,
delta=delta,
gamma=gamma,
nfold=nfold,
rng=rng,
showres=True,
saveres=saveres,
keepfig=keepfig,
outputPath=outputPath,
)
else:
nbm, accG, loss, W_mean, timeElapsed = basic_run_eta(
func_algo,
func_predict,
X,
YR,
k,
genenames,
clusternames,
eta=eta,
niter=niter,
rho=rho,
tau=tau,
beta=beta,
delta=delta,
gamma=gamma,
nfold=nfold,
rng=rng,
showres=False,
saveres=False,
outputPath=outputPath,
)[1:6]
nbm_etas[i] = nbm
accG_etas[i] = accG
loss_iter[i, :] = loss
W_mean_etas[:, :, i] = W_mean
timeElapsed_etas[i] = timeElapsed
if showres == True:
file_tag = func_algo.__name__
fig_avn = plt.figure(figsize=(8, 6))
plt.plot(nbm_etas, accG_etas, "bo-", linewidth=3)
plt.title(
"Figure: Accuracy VS Number of genes \n({})".format(file_tag), fontsize=16
)
plt.ylabel("Accuracy", fontsize=16)
plt.xlabel("Number of genes", fontsize=16)
plt.xlim([min(nbm_etas), max(nbm_etas)])
# if saveres == True:
# fig_avn.savefig('{}{}_AccVSNbG.png'.format(outputPath,file_tag))
nbm_etas = pd.DataFrame(nbm_etas, index=tabeta)
accG_etas = pd.DataFrame(accG_etas, index=tabeta)
loss_iter = pd.DataFrame(
loss_iter,
index=tabeta,
columns=np.linspace(1, niter, niter, endpoint=True, dtype=int),
).transpose()
timeElapsed_etas = pd.DataFrame(timeElapsed_etas, index=tabeta)
if saveres:
nbm_etas.to_csv(
"{}{}_Num_Features.csv".format(outputPath, func_algo.__name__), sep=";"
)
accG_etas.to_csv(
"{}{}_Acc_tabEta.csv".format(outputPath, func_algo.__name__), sep=";"
)
return (
mu,
nbm,
accG,
loss,
W_mean,
timeElapsed,
topGenes,
normW,
topGenes_normW,
topGenes_mean,
normW_mean,
topGenes_normW_mean,
acctest,
nbm_etas,
accG_etas,
loss_iter,
W_mean_etas,
timeElapsed_etas,
)
# ================================== Part 3 ====================================
# ===================== Exact algos ===========================
def run_FISTA_eta(
X,
YR,
k,
genenames=None,
clusternames=None,
niter=30,
eta=500,
beta=0.25,
delta=1.0,
gamma=1.0,
nfold=4,
showres=False,
saveres=False,
keepfig=False,
outputPath="../results/",
):
return basic_run_eta(
FISTA_Primal,
predict_FISTA,
X,
YR,
k,
genenames,
clusternames,
niter=niter,
nfold=nfold,
beta=beta,
delta=delta,
eta=eta,
gamma=gamma,
showres=showres,
saveres=saveres,
keepfig=keepfig,
outputPath=outputPath,
)
def run_primal_dual_L1N_eta(
X,
YR,
k,
genenames=None,
clusternames=None,
niter=30,
rho=1,
tau=1.5,
beta=0.25,
delta=1.0,
eta=500,
nfold=4,
random_seed=1,
showres=True,
saveres=False,
keepfig=False,
outputPath="../results/",
):
return basic_run_eta(
primal_dual_L1N,
predict_L1,
X,
YR,
k,
genenames,
clusternames,
niter=niter,
beta=beta,
delta=delta,
eta=eta,
rho=rho,
tau=tau,
nfold=nfold,
rng=random_seed,
showres=showres,
saveres=saveres,
keepfig=keepfig,
outputPath=outputPath,
)
def run_primal_dual_L1N_eta_compare(
X,
YR,
k,
alglist,
genenames=None,
clusternames=None,
niter=30,
rho=1,
tau=1.5,
beta=0.25,
delta=1.0,
eta=500,
nfold=4,
random_seed=1,
showres=False,
saveres=False,
keepfig=False,
outputPath="../results/",
):
return basic_run_eta_compare(
primal_dual_L1N,
predict_L1,
X,
YR,
k,
alglist,
genenames,
clusternames,
niter=niter,
beta=beta,
delta=delta,
eta=eta,
rho=rho,
tau=tau,
nfold=nfold,
rng=random_seed,
showres=showres,
saveres=saveres,
keepfig=keepfig,
outputPath=outputPath,
)
def run_primal_dual_Nuclear_eta(
X,
YR,
k,
genenames=None,
clusternames=None,
niter=30,
rho=1,
tau=1.5,
beta=0.25,
delta=1.0,
eta_star=500,
nfold=4,
random_seed=1,
showres=True,
saveres=False,
keepfig=False,
outputPath="../results/",
):
return basic_run_eta(
primal_dual_Nuclear,
predict_L1,
X,
YR,
k,
genenames,
clusternames,
niter=niter,
beta=beta,
delta=delta,
eta_star=eta_star,
rho=rho,
tau=tau,
nfold=nfold,
rng=random_seed,
showres=showres,
saveres=saveres,
keepfig=keepfig,
outputPath=outputPath,
)
def run_FISTA_tabeta(
X,
YR,
k,
genenames=None,
clusternames=None,
niter=30,
tabeta=[100, 200, 300, 400, 500],
gamma=1.0,
nfold=4,
random_seed=1,
showres=True,
saveres=False,
keepfig=False,
outputPath="../results/",
):
return basic_run_tabeta(
FISTA_Primal,
predict_FISTA,
X,
YR,
k,
genenames,
clusternames,
niter=niter,
tabeta=tabeta,
gamma=gamma,
nfold=nfold,
rng=random_seed,
showres=showres,
saveres=saveres,
keepfig=keepfig,
outputPath=outputPath,
)
def run_primal_dual_L1N_tabeta(
X,
YR,
k,
genenames=None,
clusternames=None,
niter=30,
rho=1,
tau=4,
beta=0.25,
nfold=4,
delta=1.0,
random_seed=1,
tabeta=[10, 20, 50, 75, 100, 200, 300],
showres=True,
keepfig=False,
saveres=False,
outputPath="../results/",
):
return basic_run_tabeta(
primal_dual_L1N,
predict_L1,
X,
YR,
k,
genenames=genenames,
clusternames=clusternames,
niter=niter,
tabeta=tabeta,
rho=rho,
tau=tau,
beta=beta,
nfold=nfold,
delta=delta,
rng=random_seed,
showres=showres,
saveres=saveres,
keepfig=keepfig,
outputPath=outputPath,
)
def run_primal_dual_Nuclear_tabEtastar(
X,
YR,
k,
genenames=None,
clusternames=None,
niter=30,
rho=1,
tau=1.5,
beta=0.25,
delta=1.0,
tabEtastar=[100, 200, 400],
gamma=1,
nfold=4,
rng=1,
showres=True,
saveres=False,
keepfig=False,
outputPath="../results/",
):
"""
It shares the same input as function basic_run_eta except that the eta is
replaced by tabeta. It has also all the output of that of the basic_run_eta
but it has its own 5 more output:
nbm_etas,accG_etas,loss_iter,W_mean_etas,timeElapsed_etas
Note : For now the funciton will save the output, show the figures and save the
results for only the last experiment, i.e. only for the last eta.
This mechanism will be changed in the future update.
"""
n_etas = len(tabEtastar)
n, d = X.shape
# W_mean_etas stores w for each eta, where w is the mean of W0 along its third axis
W_mean_etas = np.zeros((d, k, n_etas))
loss_iter = np.zeros((n_etas, niter)) # loss for each iteration of each eta
nbm_etas = np.zeros(n_etas, dtype=int)
accG_etas = np.zeros(n_etas)
timeElapsed_etas = np.zeros(n_etas)
for i, eta in enumerate(tabEtastar):
if i == (n_etas - 1):
(
mu,
nbm,
accG,
loss,
W_mean,
timeElapsed,
topGenes,
normW,
topGenes_normW,
topGenes_mean,
normW_mean,
topGenes_normW_mean,
acctest,
) = basic_run_eta(
primal_dual_Nuclear,
predict_L1,
X,
YR,
k,
genenames,
clusternames,
eta_star=eta,
niter=niter,
rho=rho,
tau=tau,
beta=beta,
delta=delta,
gamma=gamma,
nfold=nfold,
rng=rng,
showres=True,
saveres=saveres,
keepfig=keepfig,
outputPath=outputPath,
)
else:
mu, nbm, accG, loss, W_mean, timeElapsed = basic_run_eta(
primal_dual_Nuclear,
predict_L1,
X,
YR,
k,
genenames,
clusternames,
eta_star=eta,
niter=niter,
rho=rho,
tau=tau,
beta=beta,
delta=delta,
gamma=gamma,
nfold=nfold,
rng=rng,
showres=False,
saveres=False,
outputPath=outputPath,
)[0:6]
accG_etas[i] = accG
loss_iter[i, :] = loss
W_mean_etas[:, :, i] = W_mean
timeElapsed_etas[i] = timeElapsed
accG_etas = pd.DataFrame(accG_etas, index=tabEtastar)
loss_iter = pd.DataFrame(
loss_iter,
index=tabEtastar,
columns=np.linspace(1, niter, niter, endpoint=True, dtype=int),
).transpose()
timeElapsed_etas = pd.DataFrame(timeElapsed_etas, index=tabEtastar)
if saveres:
accG_etas.to_csv(
"{}{}_Acc_tabEta.csv".format(outputPath, "primal_dual_Nuclear"), sep=";"
)
return (
mu,
nbm,
accG,
loss,
W_mean,
timeElapsed,
topGenes,
normW,
topGenes_normW,
topGenes_mean,
normW_mean,
topGenes_normW_mean,
acctest,
accG_etas,
loss_iter,
W_mean_etas,
timeElapsed_etas,
)
# =============================================================================
if __name__ == "__main__":
print("This is just a storage file for functions. So... nothing happened.")
| [
"matplotlib.pyplot.title",
"numpy.absolute",
"sklearn.model_selection.GridSearchCV",
"numpy.sum",
"numpy.abs",
"numpy.random.seed",
"numpy.argmax",
"seaborn.heatmap",
"numpy.logspace",
"numpy.ones",
"numpy.argmin",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange"... | [((1465, 1482), 'numpy.reshape', 'np.reshape', (['Y', '(-1)'], {}), '(Y, -1)\n', (1475, 1482), True, 'import numpy as np\n'), ((1512, 1528), 'numpy.zeros', 'np.zeros', (['(k, d)'], {}), '((k, d))\n', (1520, 1528), True, 'import numpy as np\n'), ((2178, 2194), 'numpy.zeros', 'np.zeros', (['(n, k)'], {}), '((n, k))\n', (2186, 2194), True, 'import numpy as np\n'), ((2546, 2562), 'numpy.zeros', 'np.zeros', (['(d, 1)'], {}), '((d, 1))\n', (2554, 2562), True, 'import numpy as np\n'), ((5268, 5287), 'numpy.sum', 'np.sum', (['(idxR == idx)'], {}), '(idxR == idx)\n', (5274, 5287), True, 'import numpy as np\n'), ((5357, 5373), 'numpy.zeros', 'np.zeros', (['(1, k)'], {}), '((1, k))\n', (5365, 5373), True, 'import numpy as np\n'), ((5866, 5882), 'numpy.zeros', 'np.zeros', (['(m, 1)'], {}), '((m, 1))\n', (5874, 5882), True, 'import numpy as np\n'), ((6411, 6427), 'numpy.zeros', 'np.zeros', (['(m, 1)'], {}), '((m, 1))\n', (6419, 6427), True, 'import numpy as np\n'), ((6445, 6461), 'numpy.zeros', 'np.zeros', (['(m, 1)'], {}), '((m, 1))\n', (6453, 6461), True, 'import numpy as np\n'), ((7365, 7451), 'numpy.linspace', 'np.linspace', ([], {'start': 'first_edge', 'stop': 'last_edge', 'num': '(n_equal_bins + 1)', 'endpoint': '(True)'}), '(start=first_edge, stop=last_edge, num=n_equal_bins + 1,\n endpoint=True)\n', (7376, 7451), True, 'import numpy as np\n'), ((7470, 7499), 'matplotlib.pyplot.hist', 'plt.hist', (['rho'], {'bins': 'bin_edges'}), '(rho, bins=bin_edges)\n', (7478, 7499), True, 'from matplotlib import pyplot as plt\n'), ((7504, 7546), 'matplotlib.pyplot.title', 'plt.title', (['"""Histogram of confidence score"""'], {}), "('Histogram of confidence score')\n", (7513, 7546), True, 'from matplotlib import pyplot as plt\n'), ((7551, 7561), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7559, 7561), True, 'from matplotlib import pyplot as plt\n'), ((7602, 7614), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7612, 7614), True, 'from matplotlib import pyplot as plt\n'), ((7633, 7645), 'numpy.dot', 'np.dot', (['X', 'W'], {}), '(X, W)\n', (7639, 7645), True, 'import numpy as np\n'), ((7675, 7692), 'numpy.where', 'np.where', (['(Yr == 1)'], {}), '(Yr == 1)\n', (7683, 7692), True, 'import numpy as np\n'), ((7738, 7758), 'numpy.mean', 'np.mean', (['X_1'], {'axis': '(0)'}), '(X_1, axis=0)\n', (7745, 7758), True, 'import numpy as np\n'), ((7850, 7867), 'numpy.where', 'np.where', (['(Yr == 2)'], {}), '(Yr == 2)\n', (7858, 7867), True, 'import numpy as np\n'), ((7913, 7933), 'numpy.mean', 'np.mean', (['X_2'], {'axis': '(0)'}), '(X_2, axis=0)\n', (7920, 7933), True, 'import numpy as np\n'), ((8115, 8169), 'matplotlib.pyplot.plot', 'plt.plot', (['X_1[:, 0]', 'X_1[:, 1]', '"""ob"""'], {'label': '"""cluster1"""'}), "(X_1[:, 0], X_1[:, 1], 'ob', label='cluster1')\n", (8123, 8169), True, 'from matplotlib import pyplot as plt\n'), ((8174, 8228), 'matplotlib.pyplot.plot', 'plt.plot', (['X_2[:, 0]', 'X_2[:, 1]', '"""^r"""'], {'label': '"""cluster2"""'}), "(X_2[:, 0], X_2[:, 1], '^r', label='cluster2')\n", (8182, 8228), True, 'from matplotlib import pyplot as plt\n'), ((8234, 8258), 'matplotlib.pyplot.title', 'plt.title', (['"""Primal_Dual"""'], {}), "('Primal_Dual')\n", (8243, 8258), True, 'from matplotlib import pyplot as plt\n'), ((8263, 8275), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8273, 8275), True, 'from matplotlib import pyplot as plt\n'), ((8280, 8290), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8288, 8290), True, 'from matplotlib import pyplot as plt\n'), ((8332, 8344), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8342, 8344), True, 'from matplotlib import pyplot as plt\n'), ((8406, 8425), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (8409, 8425), False, 'from sklearn.decomposition import PCA\n'), ((8508, 8525), 'numpy.where', 'np.where', (['(Yr == 1)'], {}), '(Yr == 1)\n', (8516, 8525), True, 'import numpy as np\n'), ((8566, 8586), 'numpy.mean', 'np.mean', (['X_1'], {'axis': '(0)'}), '(X_1, axis=0)\n', (8573, 8586), True, 'import numpy as np\n'), ((8678, 8695), 'numpy.where', 'np.where', (['(Yr == 2)'], {}), '(Yr == 2)\n', (8686, 8695), True, 'import numpy as np\n'), ((8736, 8756), 'numpy.mean', 'np.mean', (['X_2'], {'axis': '(0)'}), '(X_2, axis=0)\n', (8743, 8756), True, 'import numpy as np\n'), ((8998, 9052), 'matplotlib.pyplot.plot', 'plt.plot', (['X_1[:, 0]', 'X_1[:, 1]', '"""ob"""'], {'label': '"""cluster1"""'}), "(X_1[:, 0], X_1[:, 1], 'ob', label='cluster1')\n", (9006, 9052), True, 'from matplotlib import pyplot as plt\n'), ((9057, 9111), 'matplotlib.pyplot.plot', 'plt.plot', (['X_2[:, 0]', 'X_2[:, 1]', '"""^r"""'], {'label': '"""cluster2"""'}), "(X_2[:, 0], X_2[:, 1], '^r', label='cluster2')\n", (9065, 9111), True, 'from matplotlib import pyplot as plt\n'), ((9117, 9133), 'matplotlib.pyplot.title', 'plt.title', (['"""PCA"""'], {}), "('PCA')\n", (9126, 9133), True, 'from matplotlib import pyplot as plt\n'), ((9138, 9150), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9148, 9150), True, 'from matplotlib import pyplot as plt\n'), ((9155, 9165), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9163, 9165), True, 'from matplotlib import pyplot as plt\n'), ((9794, 9830), 'numpy.array', 'np.array', (["df_confidence['Yoriginal']"], {}), "(df_confidence['Yoriginal'])\n", (9802, 9830), True, 'import numpy as np\n'), ((9873, 9905), 'numpy.array', 'np.array', (["df_confidence['Ypred']"], {}), "(df_confidence['Ypred'])\n", (9881, 9905), True, 'import numpy as np\n'), ((9986, 10018), 'numpy.arange', 'np.arange', (['(0)', 'eps', '(eps / num_eps)'], {}), '(0, eps, eps / num_eps)\n', (9995, 10018), True, 'import numpy as np\n'), ((10397, 10409), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10407, 10409), True, 'from matplotlib import pyplot as plt\n'), ((10414, 10442), 'matplotlib.pyplot.plot', 'plt.plot', (['epsList', 'falseRate'], {}), '(epsList, falseRate)\n', (10422, 10442), True, 'from matplotlib import pyplot as plt\n'), ((10447, 10488), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Confidence score prediction"""'], {}), "('Confidence score prediction')\n", (10457, 10488), True, 'from matplotlib import pyplot as plt\n'), ((10493, 10520), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""FN+FP (ratio)"""'], {}), "('FN+FP (ratio)')\n", (10503, 10520), True, 'from matplotlib import pyplot as plt\n'), ((10568, 10580), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10578, 10580), True, 'from matplotlib import pyplot as plt\n'), ((10585, 10616), 'matplotlib.pyplot.plot', 'plt.plot', (['epsList', 'rejectSample'], {}), '(epsList, rejectSample)\n', (10593, 10616), True, 'from matplotlib import pyplot as plt\n'), ((10621, 10662), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Confidence score prediction"""'], {}), "('Confidence score prediction')\n", (10631, 10662), True, 'from matplotlib import pyplot as plt\n'), ((10667, 10705), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['""" Reject samples (ratio) """'], {}), "(' Reject samples (ratio) ')\n", (10677, 10705), True, 'from matplotlib import pyplot as plt\n'), ((10718, 10737), 'numpy.array', 'np.array', (['falseRate'], {}), '(falseRate)\n', (10726, 10737), True, 'import numpy as np\n'), ((10935, 10951), 'numpy.zeros', 'np.zeros', (['(m, 1)'], {}), '((m, 1))\n', (10943, 10951), True, 'import numpy as np\n'), ((11685, 11702), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (11699, 11702), True, 'import numpy as np\n'), ((13493, 13521), 'numpy.dstack', 'np.dstack', (['(topGenes, normW)'], {}), '((topGenes, normW))\n', (13502, 13521), True, 'import numpy as np\n'), ((13716, 13750), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {'columns': 'lst_col'}), '(res, columns=lst_col)\n', (13728, 13750), True, 'import pandas as pd\n'), ((13772, 13850), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (['df_res.columns'], {'names': "['CluserNames', 'Attributes']"}), "(df_res.columns, names=['CluserNames', 'Attributes'])\n", (13797, 13850), True, 'import pandas as pd\n'), ((16003, 16031), 'numpy.dstack', 'np.dstack', (['(topGenes, normW)'], {}), '((topGenes, normW))\n', (16012, 16031), True, 'import numpy as np\n'), ((16297, 16350), 'pandas.DataFrame', 'pd.DataFrame', (['res[0:nbr_features, :]'], {'columns': 'lst_col'}), '(res[0:nbr_features, :], columns=lst_col)\n', (16309, 16350), True, 'import pandas as pd\n'), ((16372, 16450), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (['df_res.columns'], {'names': "['CluserNames', 'Attributes']"}), "(df_res.columns, names=['CluserNames', 'Attributes'])\n", (16397, 16450), True, 'import pandas as pd\n'), ((24270, 24286), 'numpy.zeros', 'np.zeros', (['(k, k)'], {}), '((k, k))\n', (24278, 24286), True, 'import numpy as np\n'), ((26035, 26048), 'numpy.abs', 'np.abs', (['normW'], {}), '(normW)\n', (26041, 26048), True, 'import numpy as np\n'), ((28096, 28108), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (28105, 28108), True, 'import numpy as np\n'), ((28320, 28354), 'numpy.random.choice', 'np.random.choice', (['lst_inds', 'n_diff'], {}), '(lst_inds, n_diff)\n', (28336, 28354), True, 'import numpy as np\n'), ((28367, 28391), 'numpy.delete', 'np.delete', (['X', 'lst_del', '(0)'], {}), '(X, lst_del, 0)\n', (28376, 28391), True, 'import numpy as np\n'), ((28404, 28428), 'numpy.delete', 'np.delete', (['Y', 'lst_del', '(0)'], {}), '(Y, lst_del, 0)\n', (28413, 28428), True, 'import numpy as np\n'), ((28442, 28467), 'numpy.delete', 'np.delete', (['ID', 'lst_del', '(0)'], {}), '(ID, lst_del, 0)\n', (28451, 28467), True, 'import numpy as np\n'), ((29306, 29318), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (29315, 29318), True, 'import numpy as np\n'), ((29530, 29564), 'numpy.random.choice', 'np.random.choice', (['lst_inds', 'n_diff'], {}), '(lst_inds, n_diff)\n', (29546, 29564), True, 'import numpy as np\n'), ((29577, 29601), 'numpy.delete', 'np.delete', (['X', 'lst_del', '(0)'], {}), '(X, lst_del, 0)\n', (29586, 29601), True, 'import numpy as np\n'), ((29614, 29638), 'numpy.delete', 'np.delete', (['Y', 'lst_del', '(0)'], {}), '(Y, lst_del, 0)\n', (29623, 29638), True, 'import numpy as np\n'), ((31238, 31253), 'numpy.zeros', 'np.zeros', (['niter'], {}), '(niter)\n', (31246, 31253), True, 'import numpy as np\n'), ((31265, 31282), 'numpy.matmul', 'np.matmul', (['X.T', 'X'], {}), '(X.T, X)\n', (31274, 31282), True, 'import numpy as np\n'), ((31293, 31310), 'numpy.matmul', 'np.matmul', (['X.T', 'Y'], {}), '(X.T, Y)\n', (31302, 31310), True, 'import numpy as np\n'), ((31324, 31339), 'numpy.ones', 'np.ones', (['(d, k)'], {}), '((d, k))\n', (31331, 31339), True, 'import numpy as np\n'), ((34253, 34268), 'numpy.ones', 'np.ones', (['(d, k)'], {}), '((d, k))\n', (34260, 34268), True, 'import numpy as np\n'), ((34281, 34296), 'numpy.ones', 'np.ones', (['(m, k)'], {}), '((m, k))\n', (34288, 34296), True, 'import numpy as np\n'), ((34310, 34322), 'numpy.eye', 'np.eye', (['k', 'k'], {}), '(k, k)\n', (34316, 34322), True, 'import numpy as np\n'), ((34332, 34344), 'numpy.eye', 'np.eye', (['k', 'k'], {}), '(k, k)\n', (34338, 34344), True, 'import numpy as np\n'), ((34356, 34371), 'numpy.zeros', 'np.zeros', (['niter'], {}), '(niter)\n', (34364, 34371), True, 'import numpy as np\n'), ((37546, 37561), 'numpy.ones', 'np.ones', (['(d, k)'], {}), '((d, k))\n', (37553, 37561), True, 'import numpy as np\n'), ((37574, 37589), 'numpy.ones', 'np.ones', (['(m, k)'], {}), '((m, k))\n', (37581, 37589), True, 'import numpy as np\n'), ((37603, 37615), 'numpy.eye', 'np.eye', (['k', 'k'], {}), '(k, k)\n', (37609, 37615), True, 'import numpy as np\n'), ((37625, 37637), 'numpy.eye', 'np.eye', (['k', 'k'], {}), '(k, k)\n', (37631, 37637), True, 'import numpy as np\n'), ((37649, 37664), 'numpy.zeros', 'np.zeros', (['niter'], {}), '(niter)\n', (37657, 37664), True, 'import numpy as np\n'), ((41448, 41467), 'numpy.random.seed', 'np.random.seed', (['rng'], {}), '(rng)\n', (41462, 41467), True, 'import numpy as np\n'), ((42861, 42887), 'numpy.zeros', 'np.zeros', (['nfold'], {'dtype': 'int'}), '(nfold, dtype=int)\n', (42869, 42887), True, 'import numpy as np\n'), ((42942, 42966), 'numpy.zeros', 'np.zeros', (['(nfold, k + 1)'], {}), '((nfold, k + 1))\n', (42950, 42966), True, 'import numpy as np\n'), ((42987, 43011), 'numpy.zeros', 'np.zeros', (['(nfold, k + 1)'], {}), '((nfold, k + 1))\n', (42995, 43011), True, 'import numpy as np\n'), ((43021, 43044), 'numpy.zeros', 'np.zeros', (['(d, k, nfold)'], {}), '((d, k, nfold))\n', (43029, 43044), True, 'import numpy as np\n'), ((43073, 43096), 'numpy.zeros', 'np.zeros', (['(k, k, nfold)'], {}), '((k, k, nfold))\n', (43081, 43096), True, 'import numpy as np\n'), ((43110, 43126), 'numpy.zeros', 'np.zeros', (['(d, k)'], {}), '((d, k))\n', (43118, 43126), True, 'import numpy as np\n'), ((43250, 43274), 'numpy.zeros', 'np.zeros', (['(nfold, niter)'], {}), '((nfold, niter))\n', (43258, 43274), True, 'import numpy as np\n'), ((43408, 43423), 'numpy.zeros', 'np.zeros', (['nfold'], {}), '(nfold)\n', (43416, 43423), True, 'import numpy as np\n'), ((45255, 45273), 'numpy.zeros', 'np.zeros', (['YR.shape'], {}), '(YR.shape)\n', (45263, 45273), True, 'import numpy as np\n'), ((45291, 45306), 'numpy.zeros', 'np.zeros', (['nfold'], {}), '(nfold)\n', (45299, 45306), True, 'import numpy as np\n'), ((45316, 45369), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'nfold', 'random_state': 'rng', 'shuffle': '(True)'}), '(n_splits=nfold, random_state=rng, shuffle=True)\n', (45321, 45369), False, 'from sklearn.model_selection import KFold\n'), ((46833, 46869), 'numpy.mean', 'np.mean', (['accuracy_test[:, 0]'], {'axis': '(0)'}), '(accuracy_test[:, 0], axis=0)\n', (46840, 46869), True, 'import numpy as np\n'), ((46917, 46936), 'numpy.mean', 'np.mean', (['W0'], {'axis': '(2)'}), '(W0, axis=2)\n', (46924, 46936), True, 'import numpy as np\n'), ((46951, 46971), 'numpy.mean', 'np.mean', (['mu0'], {'axis': '(2)'}), '(mu0, axis=2)\n', (46958, 46971), True, 'import numpy as np\n'), ((47019, 47043), 'numpy.linalg.norm', 'np.linalg.norm', (['w', '"""fro"""'], {}), "(w, 'fro')\n", (47033, 47043), True, 'import numpy as np\n'), ((47129, 47140), 'numpy.zeros', 'np.zeros', (['k'], {}), '(k)\n', (47137, 47140), True, 'import numpy as np\n'), ((47191, 47202), 'numpy.zeros', 'np.zeros', (['k'], {}), '(k)\n', (47199, 47202), True, 'import numpy as np\n'), ((47423, 47483), 'pandas.DataFrame', 'pd.DataFrame', (['size_class'], {'index': 'Ctab', 'columns': "['Class Size']"}), "(size_class, index=Ctab, columns=['Class Size'])\n", (47435, 47483), True, 'import pandas as pd\n'), ((47505, 47569), 'pandas.DataFrame', 'pd.DataFrame', (['size_class_est'], {'index': 'Ctab', 'columns': "['Class Size']"}), "(size_class_est, index=Ctab, columns=['Class Size'])\n", (47517, 47569), True, 'import pandas as pd\n'), ((48016, 48075), 'pandas.DataFrame', 'pd.DataFrame', (['accuracy_train'], {'index': 'ind_df', 'columns': 'columns'}), '(accuracy_train, index=ind_df, columns=columns)\n', (48028, 48075), True, 'import pandas as pd\n'), ((48093, 48151), 'pandas.DataFrame', 'pd.DataFrame', (['accuracy_test'], {'index': 'ind_df', 'columns': 'columns'}), '(accuracy_test, index=ind_df, columns=columns)\n', (48105, 48151), True, 'import pandas as pd\n'), ((48466, 48515), 'pandas.DataFrame', 'pd.DataFrame', (['topGenes_mean'], {'columns': 'clusternames'}), '(topGenes_mean, columns=clusternames)\n', (48478, 48515), True, 'import pandas as pd\n'), ((48536, 48582), 'pandas.DataFrame', 'pd.DataFrame', (['normW_mean'], {'columns': 'clusternames'}), '(normW_mean, columns=clusternames)\n', (48548, 48582), True, 'import pandas as pd\n'), ((48701, 48745), 'pandas.DataFrame', 'pd.DataFrame', (['topGenes'], {'columns': 'clusternames'}), '(topGenes, columns=clusternames)\n', (48713, 48745), True, 'import pandas as pd\n'), ((48761, 48802), 'pandas.DataFrame', 'pd.DataFrame', (['normW'], {'columns': 'clusternames'}), '(normW, columns=clusternames)\n', (48773, 48802), True, 'import pandas as pd\n'), ((53247, 53258), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (53255, 53258), True, 'import numpy as np\n'), ((53334, 53377), 'pandas.DataFrame', 'pd.DataFrame', (['feature_names'], {'columns': "['pd']"}), "(feature_names, columns=['pd'])\n", (53346, 53377), True, 'import pandas as pd\n'), ((53724, 53766), 'pandas.DataFrame', 'pd.DataFrame', (['feature_names'], {'columns': '[alg]'}), '(feature_names, columns=[alg])\n', (53736, 53766), True, 'import pandas as pd\n'), ((56980, 57007), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (56994, 57007), True, 'import numpy as np\n'), ((57692, 57736), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['dico'], {'orient': '"""index"""'}), "(dico, orient='index')\n", (57714, 57736), True, 'import pandas as pd\n'), ((58175, 58201), 'numpy.zeros', 'np.zeros', (['nfold'], {'dtype': 'int'}), '(nfold, dtype=int)\n', (58183, 58201), True, 'import numpy as np\n'), ((58245, 58268), 'numpy.zeros', 'np.zeros', (['(d, k, nfold)'], {}), '((d, k, nfold))\n', (58253, 58268), True, 'import numpy as np\n'), ((58297, 58320), 'numpy.zeros', 'np.zeros', (['(k, k, nfold)'], {}), '((k, k, nfold))\n', (58305, 58320), True, 'import numpy as np\n'), ((58444, 58468), 'numpy.zeros', 'np.zeros', (['(nfold, niter)'], {}), '((nfold, niter))\n', (58452, 58468), True, 'import numpy as np\n'), ((58602, 58617), 'numpy.zeros', 'np.zeros', (['nfold'], {}), '(nfold)\n', (58610, 58617), True, 'import numpy as np\n'), ((60069, 60130), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'nfold', 'random_state': 'random_seed', 'shuffle': '(True)'}), '(n_splits=nfold, random_state=random_seed, shuffle=True)\n', (60074, 60130), False, 'from sklearn.model_selection import KFold\n'), ((64146, 64165), 'numpy.random.seed', 'np.random.seed', (['rng'], {}), '(rng)\n', (64160, 64165), True, 'import numpy as np\n'), ((65559, 65585), 'numpy.zeros', 'np.zeros', (['nfold'], {'dtype': 'int'}), '(nfold, dtype=int)\n', (65567, 65585), True, 'import numpy as np\n'), ((65640, 65664), 'numpy.zeros', 'np.zeros', (['(nfold, k + 1)'], {}), '((nfold, k + 1))\n', (65648, 65664), True, 'import numpy as np\n'), ((65685, 65709), 'numpy.zeros', 'np.zeros', (['(nfold, k + 1)'], {}), '((nfold, k + 1))\n', (65693, 65709), True, 'import numpy as np\n'), ((65726, 65741), 'numpy.zeros', 'np.zeros', (['nfold'], {}), '(nfold)\n', (65734, 65741), True, 'import numpy as np\n'), ((65759, 65774), 'numpy.zeros', 'np.zeros', (['nfold'], {}), '(nfold)\n', (65767, 65774), True, 'import numpy as np\n'), ((65793, 65808), 'numpy.zeros', 'np.zeros', (['nfold'], {}), '(nfold)\n', (65801, 65808), True, 'import numpy as np\n'), ((65820, 65843), 'numpy.zeros', 'np.zeros', (['(d, k, nfold)'], {}), '((d, k, nfold))\n', (65828, 65843), True, 'import numpy as np\n'), ((65872, 65895), 'numpy.zeros', 'np.zeros', (['(k, k, nfold)'], {}), '((k, k, nfold))\n', (65880, 65895), True, 'import numpy as np\n'), ((65909, 65925), 'numpy.zeros', 'np.zeros', (['(d, k)'], {}), '((d, k))\n', (65917, 65925), True, 'import numpy as np\n'), ((66049, 66073), 'numpy.zeros', 'np.zeros', (['(nfold, niter)'], {}), '((nfold, niter))\n', (66057, 66073), True, 'import numpy as np\n'), ((66207, 66222), 'numpy.zeros', 'np.zeros', (['nfold'], {}), '(nfold)\n', (66215, 66222), True, 'import numpy as np\n'), ((66350, 66368), 'numpy.zeros', 'np.zeros', (['YR.shape'], {}), '(YR.shape)\n', (66358, 66368), True, 'import numpy as np\n'), ((66386, 66401), 'numpy.zeros', 'np.zeros', (['nfold'], {}), '(nfold)\n', (66394, 66401), True, 'import numpy as np\n'), ((66411, 66464), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'nfold', 'random_state': 'rng', 'shuffle': '(True)'}), '(n_splits=nfold, random_state=rng, shuffle=True)\n', (66416, 66464), False, 'from sklearn.model_selection import KFold\n'), ((66518, 66543), 'numpy.zeros', 'np.zeros', (['(nfold, numalg)'], {}), '((nfold, numalg))\n', (66526, 66543), True, 'import numpy as np\n'), ((66569, 66594), 'numpy.zeros', 'np.zeros', (['(nfold, numalg)'], {}), '((nfold, numalg))\n', (66577, 66594), True, 'import numpy as np\n'), ((66616, 66645), 'numpy.zeros', 'np.zeros', (['(nfold, numalg * 4)'], {}), '((nfold, numalg * 4))\n', (66624, 66645), True, 'import numpy as np\n'), ((66666, 66695), 'numpy.zeros', 'np.zeros', (['(nfold, numalg * 4)'], {}), '((nfold, numalg * 4))\n', (66674, 66695), True, 'import numpy as np\n'), ((66720, 66749), 'numpy.zeros', 'np.zeros', (['(nfold, numalg + 1)'], {}), '((nfold, numalg + 1))\n', (66728, 66749), True, 'import numpy as np\n'), ((71498, 71534), 'numpy.mean', 'np.mean', (['accuracy_test[:, 0]'], {'axis': '(0)'}), '(accuracy_test[:, 0], axis=0)\n', (71505, 71534), True, 'import numpy as np\n'), ((71582, 71601), 'numpy.mean', 'np.mean', (['W0'], {'axis': '(2)'}), '(W0, axis=2)\n', (71589, 71601), True, 'import numpy as np\n'), ((71616, 71636), 'numpy.mean', 'np.mean', (['mu0'], {'axis': '(2)'}), '(mu0, axis=2)\n', (71623, 71636), True, 'import numpy as np\n'), ((71684, 71708), 'numpy.linalg.norm', 'np.linalg.norm', (['w', '"""fro"""'], {}), "(w, 'fro')\n", (71698, 71708), True, 'import numpy as np\n'), ((71758, 71769), 'numpy.zeros', 'np.zeros', (['k'], {}), '(k)\n', (71766, 71769), True, 'import numpy as np\n'), ((71820, 71831), 'numpy.zeros', 'np.zeros', (['k'], {}), '(k)\n', (71828, 71831), True, 'import numpy as np\n'), ((72052, 72112), 'pandas.DataFrame', 'pd.DataFrame', (['size_class'], {'index': 'Ctab', 'columns': "['Class Size']"}), "(size_class, index=Ctab, columns=['Class Size'])\n", (72064, 72112), True, 'import pandas as pd\n'), ((72134, 72198), 'pandas.DataFrame', 'pd.DataFrame', (['size_class_est'], {'index': 'Ctab', 'columns': "['Class Size']"}), "(size_class_est, index=Ctab, columns=['Class Size'])\n", (72146, 72198), True, 'import pandas as pd\n'), ((72776, 72835), 'pandas.DataFrame', 'pd.DataFrame', (['accuracy_train'], {'index': 'ind_df', 'columns': 'columns'}), '(accuracy_train, index=ind_df, columns=columns)\n', (72788, 72835), True, 'import pandas as pd\n'), ((72853, 72911), 'pandas.DataFrame', 'pd.DataFrame', (['accuracy_test'], {'index': 'ind_df', 'columns': 'columns'}), '(accuracy_test, index=ind_df, columns=columns)\n', (72865, 72911), True, 'import pandas as pd\n'), ((73057, 73125), 'pandas.DataFrame', 'pd.DataFrame', (['accuracy_test_comp'], {'index': 'ind_df_comp', 'columns': 'alglist'}), '(accuracy_test_comp, index=ind_df_comp, columns=alglist)\n', (73069, 73125), True, 'import pandas as pd\n'), ((73423, 73485), 'pandas.DataFrame', 'pd.DataFrame', (['AUC_test_comp'], {'index': 'ind_df_comp', 'columns': 'colauc'}), '(AUC_test_comp, index=ind_df_comp, columns=colauc)\n', (73435, 73485), True, 'import pandas as pd\n'), ((73761, 73829), 'pandas.DataFrame', 'pd.DataFrame', (['timeElapsedMatrix'], {'index': 'ind_df_comp', 'columns': 'alglist1'}), '(timeElapsedMatrix, index=ind_df_comp, columns=alglist1)\n', (73773, 73829), True, 'import pandas as pd\n'), ((74212, 74261), 'pandas.DataFrame', 'pd.DataFrame', (['topGenes_mean'], {'columns': 'clusternames'}), '(topGenes_mean, columns=clusternames)\n', (74224, 74261), True, 'import pandas as pd\n'), ((74282, 74328), 'pandas.DataFrame', 'pd.DataFrame', (['normW_mean'], {'columns': 'clusternames'}), '(normW_mean, columns=clusternames)\n', (74294, 74328), True, 'import pandas as pd\n'), ((74447, 74491), 'pandas.DataFrame', 'pd.DataFrame', (['topGenes'], {'columns': 'clusternames'}), '(topGenes, columns=clusternames)\n', (74459, 74491), True, 'import pandas as pd\n'), ((74507, 74548), 'pandas.DataFrame', 'pd.DataFrame', (['normW'], {'columns': 'clusternames'}), '(normW, columns=clusternames)\n', (74519, 74548), True, 'import pandas as pd\n'), ((79495, 79514), 'numpy.random.seed', 'np.random.seed', (['rng'], {}), '(rng)\n', (79509, 79514), True, 'import numpy as np\n'), ((80096, 80111), 'numpy.zeros', 'np.zeros', (['nfold'], {}), '(nfold)\n', (80104, 80111), True, 'import numpy as np\n'), ((80124, 80177), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'nfold', 'random_state': 'rng', 'shuffle': '(True)'}), '(n_splits=nfold, random_state=rng, shuffle=True)\n', (80129, 80177), False, 'from sklearn.model_selection import KFold\n'), ((80231, 80256), 'numpy.zeros', 'np.zeros', (['(nfold, numalg)'], {}), '((nfold, numalg))\n', (80239, 80256), True, 'import numpy as np\n'), ((80282, 80307), 'numpy.zeros', 'np.zeros', (['(nfold, numalg)'], {}), '((nfold, numalg))\n', (80290, 80307), True, 'import numpy as np\n'), ((80329, 80358), 'numpy.zeros', 'np.zeros', (['(nfold, numalg * 4)'], {}), '((nfold, numalg * 4))\n', (80337, 80358), True, 'import numpy as np\n'), ((80379, 80408), 'numpy.zeros', 'np.zeros', (['(nfold, numalg * 4)'], {}), '((nfold, numalg * 4))\n', (80387, 80408), True, 'import numpy as np\n'), ((80433, 80458), 'numpy.zeros', 'np.zeros', (['(nfold, numalg)'], {}), '((nfold, numalg))\n', (80441, 80458), True, 'import numpy as np\n'), ((85421, 85489), 'pandas.DataFrame', 'pd.DataFrame', (['accuracy_test_comp'], {'index': 'ind_df_comp', 'columns': 'alglist'}), '(accuracy_test_comp, index=ind_df_comp, columns=alglist)\n', (85433, 85489), True, 'import pandas as pd\n'), ((85747, 85809), 'pandas.DataFrame', 'pd.DataFrame', (['AUC_test_comp'], {'index': 'ind_df_comp', 'columns': 'colauc'}), '(AUC_test_comp, index=ind_df_comp, columns=colauc)\n', (85759, 85809), True, 'import pandas as pd\n'), ((86027, 86095), 'pandas.DataFrame', 'pd.DataFrame', (['timeElapsedMatrix'], {'index': 'ind_df_comp', 'columns': 'alglist1'}), '(timeElapsedMatrix, index=ind_df_comp, columns=alglist1)\n', (86039, 86095), True, 'import pandas as pd\n'), ((87227, 87251), 'numpy.zeros', 'np.zeros', (['(d, k, n_etas)'], {}), '((d, k, n_etas))\n', (87235, 87251), True, 'import numpy as np\n'), ((87268, 87293), 'numpy.zeros', 'np.zeros', (['(n_etas, niter)'], {}), '((n_etas, niter))\n', (87276, 87293), True, 'import numpy as np\n'), ((87348, 87375), 'numpy.zeros', 'np.zeros', (['n_etas'], {'dtype': 'int'}), '(n_etas, dtype=int)\n', (87356, 87375), True, 'import numpy as np\n'), ((87392, 87408), 'numpy.zeros', 'np.zeros', (['n_etas'], {}), '(n_etas)\n', (87400, 87408), True, 'import numpy as np\n'), ((87432, 87448), 'numpy.zeros', 'np.zeros', (['n_etas'], {}), '(n_etas)\n', (87440, 87448), True, 'import numpy as np\n'), ((89788, 89824), 'pandas.DataFrame', 'pd.DataFrame', (['nbm_etas'], {'index': 'tabeta'}), '(nbm_etas, index=tabeta)\n', (89800, 89824), True, 'import pandas as pd\n'), ((89841, 89878), 'pandas.DataFrame', 'pd.DataFrame', (['accG_etas'], {'index': 'tabeta'}), '(accG_etas, index=tabeta)\n', (89853, 89878), True, 'import pandas as pd\n'), ((90063, 90107), 'pandas.DataFrame', 'pd.DataFrame', (['timeElapsed_etas'], {'index': 'tabeta'}), '(timeElapsed_etas, index=tabeta)\n', (90075, 90107), True, 'import pandas as pd\n'), ((96131, 96155), 'numpy.zeros', 'np.zeros', (['(d, k, n_etas)'], {}), '((d, k, n_etas))\n', (96139, 96155), True, 'import numpy as np\n'), ((96172, 96197), 'numpy.zeros', 'np.zeros', (['(n_etas, niter)'], {}), '((n_etas, niter))\n', (96180, 96197), True, 'import numpy as np\n'), ((96252, 96279), 'numpy.zeros', 'np.zeros', (['n_etas'], {'dtype': 'int'}), '(n_etas, dtype=int)\n', (96260, 96279), True, 'import numpy as np\n'), ((96296, 96312), 'numpy.zeros', 'np.zeros', (['n_etas'], {}), '(n_etas)\n', (96304, 96312), True, 'import numpy as np\n'), ((96336, 96352), 'numpy.zeros', 'np.zeros', (['n_etas'], {}), '(n_etas)\n', (96344, 96352), True, 'import numpy as np\n'), ((98174, 98215), 'pandas.DataFrame', 'pd.DataFrame', (['accG_etas'], {'index': 'tabEtastar'}), '(accG_etas, index=tabEtastar)\n', (98186, 98215), True, 'import pandas as pd\n'), ((98404, 98452), 'pandas.DataFrame', 'pd.DataFrame', (['timeElapsed_etas'], {'index': 'tabEtastar'}), '(timeElapsed_etas, index=tabEtastar)\n', (98416, 98452), True, 'import pandas as pd\n'), ((1034, 1045), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1042, 1045), True, 'import numpy as np\n'), ((1077, 1097), 'numpy.reshape', 'np.reshape', (['y', '(-1,)'], {}), '(y, (-1,))\n', (1087, 1097), True, 'import numpy as np\n'), ((1419, 1429), 'numpy.sign', 'np.sign', (['y'], {}), '(y)\n', (1426, 1429), True, 'import numpy as np\n'), ((1763, 1781), 'numpy.mean', 'np.mean', (['C'], {'axis': '(0)'}), '(C, axis=0)\n', (1770, 1781), True, 'import numpy as np\n'), ((2136, 2153), 'numpy.reshape', 'np.reshape', (['y', '(-1)'], {}), '(y, -1)\n', (2146, 2153), True, 'import numpy as np\n'), ((2673, 2697), 'numpy.where', 'np.where', (['(ind_genes == 1)'], {}), '(ind_genes == 1)\n', (2681, 2697), True, 'import numpy as np\n'), ((2715, 2732), 'numpy.sum', 'np.sum', (['ind_genes'], {}), '(ind_genes)\n', (2721, 2732), True, 'import numpy as np\n'), ((2961, 2974), 'numpy.abs', 'np.abs', (['s_tmp'], {}), '(s_tmp)\n', (2967, 2974), True, 'import numpy as np\n'), ((4015, 4040), 'numpy.array', 'np.array', (['lst_features[i]'], {}), '(lst_features[i])\n', (4023, 4040), True, 'import numpy as np\n'), ((4098, 4119), 'numpy.array', 'np.array', (['lst_norm[i]'], {}), '(lst_norm[i])\n', (4106, 4119), True, 'import numpy as np\n'), ((4272, 4297), 'numpy.vstack', 'np.vstack', (['(features, ft)'], {}), '((features, ft))\n', (4281, 4297), True, 'import numpy as np\n'), ((4314, 4336), 'numpy.vstack', 'np.vstack', (['(normW, nt)'], {}), '((normW, nt))\n', (4323, 4336), True, 'import numpy as np\n'), ((5009, 5023), 'numpy.array', 'np.array', (['idxR'], {}), '(idxR)\n', (5017, 5023), True, 'import numpy as np\n'), ((5072, 5085), 'numpy.array', 'np.array', (['idx'], {}), '(idx)\n', (5080, 5085), True, 'import numpy as np\n'), ((5148, 5173), 'numpy.reshape', 'np.reshape', (['idxR', '(-1, 1)'], {}), '(idxR, (-1, 1))\n', (5158, 5173), True, 'import numpy as np\n'), ((5210, 5237), 'numpy.reshape', 'np.reshape', (['idx', 'idxR.shape'], {}), '(idx, idxR.shape)\n', (5220, 5237), True, 'import numpy as np\n'), ((5924, 5940), 'numpy.zeros', 'np.zeros', (['(1, k)'], {}), '((1, k))\n', (5932, 5940), True, 'import numpy as np\n'), ((5955, 5980), 'numpy.matmul', 'np.matmul', (['Xtest[i, :]', 'W'], {}), '(Xtest[i, :], W)\n', (5964, 5980), True, 'import numpy as np\n'), ((6502, 6518), 'numpy.zeros', 'np.zeros', (['(1, k)'], {}), '((1, k))\n', (6510, 6518), True, 'import numpy as np\n'), ((6533, 6558), 'numpy.matmul', 'np.matmul', (['Xtest[i, :]', 'W'], {}), '(Xtest[i, :], W)\n', (6542, 6558), True, 'import numpy as np\n'), ((7964, 8032), 'matplotlib.pyplot.scatter', 'plt.scatter', (['c1[0]', 'c1[1]'], {'c': '"""y"""', 's': '(100)', 'marker': '"""*"""', 'label': '"""center1"""'}), "(c1[0], c1[1], c='y', s=100, marker='*', label='center1')\n", (7975, 8032), True, 'from matplotlib import pyplot as plt\n'), ((8041, 8109), 'matplotlib.pyplot.scatter', 'plt.scatter', (['c2[0]', 'c2[1]'], {'c': '"""c"""', 's': '(100)', 'marker': '"""*"""', 'label': '"""center2"""'}), "(c2[0], c2[1], c='c', s=100, marker='*', label='center2')\n", (8052, 8109), True, 'from matplotlib import pyplot as plt\n'), ((8847, 8915), 'matplotlib.pyplot.scatter', 'plt.scatter', (['c1[0]', 'c1[1]'], {'c': '"""y"""', 's': '(100)', 'marker': '"""*"""', 'label': '"""center1"""'}), "(c1[0], c1[1], c='y', s=100, marker='*', label='center1')\n", (8858, 8915), True, 'from matplotlib import pyplot as plt\n'), ((8924, 8992), 'matplotlib.pyplot.scatter', 'plt.scatter', (['c2[0]', 'c2[1]'], {'c': '"""c"""', 's': '(100)', 'marker': '"""*"""', 'label': '"""center2"""'}), "(c2[0], c2[1], c='c', s=100, marker='*', label='center2')\n", (8935, 8992), True, 'from matplotlib import pyplot as plt\n'), ((9838, 9855), 'numpy.where', 'np.where', (['(Yr == 2)'], {}), '(Yr == 2)\n', (9846, 9855), True, 'import numpy as np\n'), ((9915, 9934), 'numpy.where', 'np.where', (['(Ypre == 2)'], {}), '(Ypre == 2)\n', (9923, 9934), True, 'import numpy as np\n'), ((10104, 10148), 'numpy.where', 'np.where', (['((-epsilon < rho) & (rho < epsilon))'], {}), '((-epsilon < rho) & (rho < epsilon))\n', (10112, 10148), True, 'import numpy as np\n'), ((10993, 11009), 'numpy.zeros', 'np.zeros', (['(1, k)'], {}), '((1, k))\n', (11001, 11009), True, 'import numpy as np\n'), ((11024, 11049), 'numpy.matmul', 'np.matmul', (['Xtest[i, :]', 'W'], {}), '(Xtest[i, :], W)\n', (11033, 11049), True, 'import numpy as np\n'), ((11805, 11829), 'numpy.abs', 'np.abs', (['(norm_e - norm_e0)'], {}), '(norm_e - norm_e0)\n', (11811, 11829), True, 'import numpy as np\n'), ((11884, 11899), 'numpy.matmul', 'np.matmul', (['X', 'x'], {}), '(X, x)\n', (11893, 11899), True, 'import numpy as np\n'), ((11995, 12013), 'numpy.matmul', 'np.matmul', (['X.T', 'Xx'], {}), '(X.T, Xx)\n', (12004, 12013), True, 'import numpy as np\n'), ((12030, 12047), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (12044, 12047), True, 'import numpy as np\n'), ((18130, 18166), 'pandas.DataFrame', 'pd.DataFrame', (['topGenes1'], {'columns': 'col'}), '(topGenes1, columns=col)\n', (18142, 18166), True, 'import pandas as pd\n'), ((18187, 18223), 'pandas.DataFrame', 'pd.DataFrame', (['topGenes2'], {'columns': 'col'}), '(topGenes2, columns=col)\n', (18199, 18223), True, 'import pandas as pd\n'), ((18681, 18719), 'numpy.array', 'np.array', (['topGenes1[[name]]'], {'dtype': 'str'}), '(topGenes1[[name]], dtype=str)\n', (18689, 18719), True, 'import numpy as np\n'), ((18736, 18774), 'numpy.array', 'np.array', (['topGenes2[[name]]'], {'dtype': 'str'}), '(topGenes2[[name]], dtype=str)\n', (18744, 18774), True, 'import numpy as np\n'), ((21723, 21751), 'numpy.intersect1d', 'np.intersect1d', (['set_1', 'set_2'], {}), '(set_1, set_2)\n', (21737, 21751), True, 'import numpy as np\n'), ((21787, 21813), 'numpy.setdiff1d', 'np.setdiff1d', (['set_1', 'set_2'], {}), '(set_1, set_2)\n', (21799, 21813), True, 'import numpy as np\n'), ((21859, 21885), 'numpy.setdiff1d', 'np.setdiff1d', (['set_2', 'set_1'], {}), '(set_2, set_1)\n', (21871, 21885), True, 'import numpy as np\n'), ((24194, 24207), 'numpy.unique', 'np.unique', (['YR'], {}), '(YR)\n', (24203, 24207), True, 'import numpy as np\n'), ((24300, 24312), 'numpy.arange', 'np.arange', (['k'], {}), '(k)\n', (24309, 24312), True, 'import numpy as np\n'), ((24617, 24644), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (24627, 24644), True, 'from matplotlib import pyplot as plt\n'), ((25254, 25281), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': 'rotate'}), '(rotation=rotate)\n', (25264, 25281), True, 'from matplotlib import pyplot as plt\n'), ((25357, 25375), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (25373, 25375), True, 'from matplotlib import pyplot as plt\n'), ((26208, 26235), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (26218, 26235), True, 'from matplotlib import pyplot as plt\n'), ((27125, 27143), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (27141, 27143), True, 'from matplotlib import pyplot as plt\n'), ((28179, 28191), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (28188, 28191), True, 'import numpy as np\n'), ((28192, 28212), 'numpy.argmax', 'np.argmax', (['lst_count'], {}), '(lst_count)\n', (28201, 28212), True, 'import numpy as np\n'), ((28229, 28251), 'numpy.where', 'np.where', (['(Y == ind_max)'], {}), '(Y == ind_max)\n', (28237, 28251), True, 'import numpy as np\n'), ((29389, 29401), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (29398, 29401), True, 'import numpy as np\n'), ((29402, 29422), 'numpy.argmax', 'np.argmax', (['lst_count'], {}), '(lst_count)\n', (29411, 29422), True, 'import numpy as np\n'), ((29439, 29461), 'numpy.where', 'np.where', (['(Y == ind_max)'], {}), '(Y == ind_max)\n', (29447, 29461), True, 'import numpy as np\n'), ((31519, 31539), 'numpy.reshape', 'np.reshape', (['V', '(d * k)'], {}), '(V, d * k)\n', (31529, 31539), True, 'import numpy as np\n'), ((31649, 31670), 'numpy.reshape', 'np.reshape', (['V', '(d, k)'], {}), '(V, (d, k))\n', (31659, 31670), True, 'import numpy as np\n'), ((32030, 32045), 'numpy.matmul', 'np.matmul', (['X', 'w'], {}), '(X, w)\n', (32039, 32045), True, 'import numpy as np\n'), ((34495, 34515), 'numpy.reshape', 'np.reshape', (['V', '(d * k)'], {}), '(V, d * k)\n', (34505, 34515), True, 'import numpy as np\n'), ((34633, 34654), 'numpy.reshape', 'np.reshape', (['V', '(d, k)'], {}), '(V, (d, k))\n', (34643, 34654), True, 'import numpy as np\n'), ((37807, 37844), 'numpy.linalg.svd', 'np.linalg.svd', (['V'], {'full_matrices': '(False)'}), '(V, full_matrices=False)\n', (37820, 37844), True, 'import numpy as np\n'), ((37979, 38010), 'numpy.matmul', 'np.matmul', (['L', '(S1[..., None] * R)'], {}), '(L, S1[..., None] * R)\n', (37988, 38010), True, 'import numpy as np\n'), ((41495, 41521), 'os.path.exists', 'os.path.exists', (['outputPath'], {}), '(outputPath)\n', (41509, 41521), False, 'import os\n'), ((41574, 41597), 'os.makedirs', 'os.makedirs', (['outputPath'], {}), '(outputPath)\n', (41585, 41597), False, 'import os\n'), ((45874, 45893), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (45891, 45893), False, 'import time\n'), ((45984, 46003), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (46001, 46003), False, 'import time\n'), ((46636, 46670), 'numpy.mean', 'np.mean', (['accuracy_test[i, 1:k + 1]'], {}), '(accuracy_test[i, 1:k + 1])\n', (46643, 46670), True, 'import numpy as np\n'), ((49479, 49505), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (49489, 49505), True, 'from matplotlib import pyplot as plt\n'), ((49890, 49921), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {'fontsize': '(18)'}), "('Loss', fontsize=18)\n", (49900, 49921), True, 'from matplotlib import pyplot as plt\n'), ((49930, 49966), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {'fontsize': '(18)'}), "('Iteration', fontsize=18)\n", (49940, 49966), True, 'from matplotlib import pyplot as plt\n'), ((50050, 50079), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {'left': '(1)', 'right': 'niter'}), '(left=1, right=niter)\n', (50058, 50079), True, 'from matplotlib import pyplot as plt\n'), ((50088, 50104), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (50096, 50104), True, 'from matplotlib import pyplot as plt\n'), ((50517, 50561), 'numpy.savetxt', 'np.savetxt', (['(outputPath + filename_loss)', 'loss'], {}), '(outputPath + filename_loss, loss)\n', (50527, 50561), True, 'import numpy as np\n'), ((50827, 50875), 'numpy.save', 'np.save', (['filename_heat', 'M_heatmap_classification'], {}), '(filename_heat, M_heatmap_classification)\n', (50834, 50875), True, 'import numpy as np\n'), ((50998, 51041), 'numpy.save', 'np.save', (['filename_heat', 'M_heatmap_signature'], {}), '(filename_heat, M_heatmap_signature)\n', (51005, 51041), True, 'import numpy as np\n'), ((60637, 60656), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (60654, 60656), False, 'import time\n'), ((60753, 60772), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (60770, 60772), False, 'import time\n'), ((64193, 64219), 'os.path.exists', 'os.path.exists', (['outputPath'], {}), '(outputPath)\n', (64207, 64219), False, 'import os\n'), ((64272, 64295), 'os.makedirs', 'os.makedirs', (['outputPath'], {}), '(outputPath)\n', (64283, 64295), False, 'import os\n'), ((67318, 67337), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (67335, 67337), False, 'import time\n'), ((67428, 67447), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (67445, 67447), False, 'import time\n'), ((68404, 68438), 'numpy.mean', 'np.mean', (['accuracy_test[i, 1:k + 1]'], {}), '(accuracy_test[i, 1:k + 1])\n', (68411, 68438), True, 'import numpy as np\n'), ((75209, 75235), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (75219, 75235), True, 'from matplotlib import pyplot as plt\n'), ((75620, 75651), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {'fontsize': '(18)'}), "('Loss', fontsize=18)\n", (75630, 75651), True, 'from matplotlib import pyplot as plt\n'), ((75660, 75696), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {'fontsize': '(18)'}), "('Iteration', fontsize=18)\n", (75670, 75696), True, 'from matplotlib import pyplot as plt\n'), ((75780, 75809), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {'left': '(1)', 'right': 'niter'}), '(left=1, right=niter)\n', (75788, 75809), True, 'from matplotlib import pyplot as plt\n'), ((75818, 75834), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (75826, 75834), True, 'from matplotlib import pyplot as plt\n'), ((78898, 78911), 'numpy.unique', 'np.unique', (['Yr'], {}), '(Yr)\n', (78907, 78911), True, 'import numpy as np\n'), ((89326, 89352), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (89336, 89352), True, 'from matplotlib import pyplot as plt\n'), ((89361, 89410), 'matplotlib.pyplot.plot', 'plt.plot', (['nbm_etas', 'accG_etas', '"""bo-"""'], {'linewidth': '(3)'}), "(nbm_etas, accG_etas, 'bo-', linewidth=3)\n", (89369, 89410), True, 'from matplotlib import pyplot as plt\n'), ((89535, 89570), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {'fontsize': '(16)'}), "('Accuracy', fontsize=16)\n", (89545, 89570), True, 'from matplotlib import pyplot as plt\n'), ((89579, 89621), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of genes"""'], {'fontsize': '(16)'}), "('Number of genes', fontsize=16)\n", (89589, 89621), True, 'from matplotlib import pyplot as plt\n'), ((2598, 2621), 'numpy.linalg.norm', 'np.linalg.norm', (['w[i, :]'], {}), '(w[i, :])\n', (2612, 2621), True, 'import numpy as np\n'), ((3027, 3044), 'numpy.argsort', 'np.argsort', (['f_tmp'], {}), '(f_tmp)\n', (3037, 3044), True, 'import numpy as np\n'), ((3148, 3162), 'numpy.sort', 'np.sort', (['f_tmp'], {}), '(f_tmp)\n', (3155, 3162), True, 'import numpy as np\n'), ((3236, 3253), 'numpy.nonzero', 'np.nonzero', (['f_tmp'], {}), '(f_tmp)\n', (3246, 3253), True, 'import numpy as np\n'), ((5553, 5576), 'numpy.where', 'np.where', (['(idxR == j + 1)'], {}), '(idxR == j + 1)\n', (5561, 5576), True, 'import numpy as np\n'), ((6035, 6068), 'numpy.linalg.norm', 'np.linalg.norm', (['(XWi - mu[j, :])', '(1)'], {}), '(XWi - mu[j, :], 1)\n', (6049, 6068), True, 'import numpy as np\n'), ((6172, 6189), 'numpy.argmin', 'np.argmin', (['distmu'], {}), '(distmu)\n', (6181, 6189), True, 'import numpy as np\n'), ((6613, 6646), 'numpy.linalg.norm', 'np.linalg.norm', (['(XWi - mu[j, :])', '(1)'], {}), '(XWi - mu[j, :], 1)\n', (6627, 6646), True, 'import numpy as np\n'), ((6666, 6683), 'numpy.argmin', 'np.argmin', (['distmu'], {}), '(distmu)\n', (6675, 6683), True, 'import numpy as np\n'), ((11104, 11137), 'numpy.linalg.norm', 'np.linalg.norm', (['(XWi - mu[j, :])', '(2)'], {}), '(XWi - mu[j, :], 2)\n', (11118, 11137), True, 'import numpy as np\n'), ((11157, 11174), 'numpy.argmin', 'np.argmin', (['distmu'], {}), '(distmu)\n', (11166, 11174), True, 'import numpy as np\n'), ((11911, 11931), 'numpy.count_nonzero', 'np.count_nonzero', (['Xx'], {}), '(Xx)\n', (11927, 11931), True, 'import numpy as np\n'), ((11955, 11982), 'numpy.random.rand', 'np.random.rand', (['Xx.shape[0]'], {}), '(Xx.shape[0])\n', (11969, 11982), True, 'import numpy as np\n'), ((12073, 12091), 'numpy.linalg.norm', 'np.linalg.norm', (['Xx'], {}), '(Xx)\n', (12087, 12091), True, 'import numpy as np\n'), ((24335, 24347), 'numpy.arange', 'np.arange', (['k'], {}), '(k)\n', (24344, 24347), True, 'import numpy as np\n'), ((24768, 24888), 'seaborn.heatmap', 'sns.heatmap', (['Heatmap_matrix'], {'cmap': '"""jet"""', 'annot': 'annot', 'fmt': '""".2f"""', 'xticklabels': 'clusternames', 'yticklabels': 'clusternames'}), "(Heatmap_matrix, cmap='jet', annot=annot, fmt='.2f', xticklabels\n =clusternames, yticklabels=clusternames)\n", (24779, 24888), True, 'import seaborn as sns\n'), ((25028, 25091), 'seaborn.heatmap', 'sns.heatmap', (['Heatmap_matrix'], {'cmap': '"""jet"""', 'annot': 'annot', 'fmt': '""".2f"""'}), "(Heatmap_matrix, cmap='jet', annot=annot, fmt='.2f')\n", (25039, 25091), True, 'import seaborn as sns\n'), ((26890, 26917), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': 'rotate'}), '(rotation=rotate)\n', (26900, 26917), True, 'from matplotlib import pyplot as plt\n'), ((31418, 31439), 'numpy.matmul', 'np.matmul', (['XtX', 'w_loc'], {}), '(XtX, w_loc)\n', (31427, 31439), True, 'import numpy as np\n'), ((35080, 35096), 'numpy.minimum', 'np.minimum', (['Z', '(1)'], {}), '(Z, 1)\n', (35090, 35096), True, 'import numpy as np\n'), ((38307, 38323), 'numpy.minimum', 'np.minimum', (['Z', '(1)'], {}), '(Z, 1)\n', (38317, 38323), True, 'import numpy as np\n'), ((47639, 47670), 'numpy.mean', 'np.mean', (['accuracy_train'], {'axis': '(0)'}), '(accuracy_train, axis=0)\n', (47646, 47670), True, 'import numpy as np\n'), ((47719, 47749), 'numpy.mean', 'np.mean', (['accuracy_test'], {'axis': '(0)'}), '(accuracy_test, axis=0)\n', (47726, 47749), True, 'import numpy as np\n'), ((49439, 49455), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (49448, 49455), True, 'from matplotlib import pyplot as plt\n'), ((49986, 50040), 'numpy.linspace', 'np.linspace', (['(1)', 'niter'], {'num': '(6)', 'endpoint': '(True)', 'dtype': 'int'}), '(1, niter, num=6, endpoint=True, dtype=int)\n', (49997, 50040), True, 'import numpy as np\n'), ((54098, 54136), 'sklearn.svm.SVC', 'SVC', ([], {'probability': '(True)', 'kernel': '"""linear"""'}), "(probability=True, kernel='linear')\n", (54101, 54136), False, 'from sklearn.svm import SVC\n'), ((54385, 54455), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(400)', 'random_state': '(10)', 'max_depth': '(3)'}), '(n_estimators=400, random_state=10, max_depth=3)\n', (54407, 54455), False, 'from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\n'), ((54710, 54752), 'sklearn.cross_decomposition.PLSRegression', 'PLSRegression', ([], {'n_components': '(4)', 'scale': '(False)'}), '(n_components=4, scale=False)\n', (54723, 54752), False, 'from sklearn.cross_decomposition import PLSRegression\n'), ((69294, 69313), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (69311, 69313), False, 'import time\n'), ((69544, 69563), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (69561, 69563), False, 'import time\n'), ((72268, 72299), 'numpy.mean', 'np.mean', (['accuracy_train'], {'axis': '(0)'}), '(accuracy_train, axis=0)\n', (72275, 72299), True, 'import numpy as np\n'), ((72348, 72378), 'numpy.mean', 'np.mean', (['accuracy_test'], {'axis': '(0)'}), '(accuracy_test, axis=0)\n', (72355, 72378), True, 'import numpy as np\n'), ((75169, 75185), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (75178, 75185), True, 'from matplotlib import pyplot as plt\n'), ((75716, 75770), 'numpy.linspace', 'np.linspace', (['(1)', 'niter'], {'num': '(6)', 'endpoint': '(True)', 'dtype': 'int'}), '(1, niter, num=6, endpoint=True, dtype=int)\n', (75727, 75770), True, 'import numpy as np\n'), ((79113, 79138), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (79136, 79138), False, 'import warnings\n'), ((79152, 79214), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (79173, 79214), False, 'import warnings\n'), ((79232, 79264), 'numpy.where', 'np.where', (['(Yr == label)', 'index', 'Yr'], {}), '(Yr == label, index, Yr)\n', (79240, 79264), True, 'import numpy as np\n'), ((82610, 82629), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (82627, 82629), False, 'import time\n'), ((82704, 82723), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (82721, 82723), False, 'import time\n'), ((1129, 1143), 'numpy.absolute', 'np.absolute', (['y'], {}), '(y)\n', (1140, 1143), True, 'import numpy as np\n'), ((11442, 11451), 'numpy.abs', 'np.abs', (['X'], {}), '(X)\n', (11448, 11451), True, 'import numpy as np\n'), ((11652, 11661), 'numpy.abs', 'np.abs', (['X'], {}), '(X)\n', (11658, 11661), True, 'import numpy as np\n'), ((34443, 34464), 'numpy.matmul', 'np.matmul', (['X.T', 'Z_old'], {}), '(X.T, Z_old)\n', (34452, 34464), True, 'import numpy as np\n'), ((37736, 37757), 'numpy.matmul', 'np.matmul', (['X.T', 'Z_old'], {}), '(X.T, Z_old)\n', (37745, 37757), True, 'import numpy as np\n'), ((42503, 42513), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (42510, 42513), True, 'import numpy as np\n'), ((49523, 49550), 'numpy.arange', 'np.arange', (['niter'], {'dtype': 'int'}), '(niter, dtype=int)\n', (49532, 49550), True, 'import numpy as np\n'), ((57850, 57860), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (57857, 57860), True, 'import numpy as np\n'), ((65201, 65211), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (65208, 65211), True, 'import numpy as np\n'), ((69035, 69105), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(400)', 'random_state': '(10)', 'max_depth': '(3)'}), '(n_estimators=400, random_state=10, max_depth=3)\n', (69057, 69105), False, 'from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\n'), ((69197, 69239), 'sklearn.cross_decomposition.PLSRegression', 'PLSRegression', ([], {'n_components': '(4)', 'scale': '(False)'}), '(n_components=4, scale=False)\n', (69210, 69239), False, 'from sklearn.cross_decomposition import PLSRegression\n'), ((70448, 70502), 'sklearn.metrics.silhouette_score', 'metrics.silhouette_score', (['model.x_scores_', 'Ypred_train'], {}), '(model.x_scores_, Ypred_train)\n', (70472, 70502), False, 'from sklearn import metrics\n'), ((75253, 75280), 'numpy.arange', 'np.arange', (['niter'], {'dtype': 'int'}), '(niter, dtype=int)\n', (75262, 75280), True, 'import numpy as np\n'), ((81339, 81409), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(400)', 'random_state': '(10)', 'max_depth': '(3)'}), '(n_estimators=400, random_state=10, max_depth=3)\n', (81361, 81409), False, 'from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\n'), ((81566, 81608), 'sklearn.cross_decomposition.PLSRegression', 'PLSRegression', ([], {'n_components': '(4)', 'scale': '(False)'}), '(n_components=4, scale=False)\n', (81579, 81608), False, 'from sklearn.cross_decomposition import PLSRegression\n'), ((81713, 81737), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(10)'}), '(C=10)\n', (81731, 81737), False, 'from sklearn.linear_model import LogisticRegression, Lasso\n'), ((81850, 81886), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(50)'}), '(n_neighbors=50)\n', (81870, 81886), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((81946, 81977), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {'var_smoothing': '(1e-09)'}), '(var_smoothing=1e-09)\n', (81956, 81977), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((82063, 82099), 'sklearn.ensemble.AdaBoostClassifier', 'AdaBoostClassifier', ([], {'n_estimators': '(100)'}), '(n_estimators=100)\n', (82081, 82099), False, 'from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\n'), ((82246, 82283), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'random_state': '(0)', 'max_iter': '(10000)'}), '(random_state=0, max_iter=10000)\n', (82251, 82283), False, 'from sklearn.linear_model import LogisticRegression, Lasso\n'), ((82309, 82334), 'numpy.logspace', 'np.logspace', (['(-4)', '(-0.5)', '(20)'], {}), '(-4, -0.5, 20)\n', (82320, 82334), True, 'import numpy as np\n'), ((82440, 82489), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['lasso', 'tuned_parameters'], {'cv': 'n_folds'}), '(lasso, tuned_parameters, cv=n_folds)\n', (82452, 82489), False, 'from sklearn.model_selection import GridSearchCV\n'), ((83537, 83591), 'sklearn.metrics.silhouette_score', 'metrics.silhouette_score', (['model.x_scores_', 'Ypred_train'], {}), '(model.x_scores_, Ypred_train)\n', (83561, 83591), False, 'from sklearn import metrics\n'), ((5686, 5711), 'numpy.sum', 'np.sum', (['(idx[ind] == j + 1)'], {}), '(idx[ind] == j + 1)\n', (5692, 5711), True, 'import numpy as np\n'), ((10286, 10306), 'numpy.where', 'np.where', (['(Ydiff != 0)'], {}), '(Ydiff != 0)\n', (10294, 10306), True, 'import numpy as np\n'), ((11559, 11572), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (11569, 11572), True, 'import numpy as np\n'), ((12345, 12369), 'numpy.abs', 'np.abs', (['(norm_e - norm_e0)'], {}), '(norm_e - norm_e0)\n', (12351, 12369), True, 'import numpy as np\n'), ((19138, 19155), 'numpy.nonzero', 'np.nonzero', (['col_1'], {}), '(col_1)\n', (19148, 19155), True, 'import numpy as np\n'), ((19194, 19211), 'numpy.nonzero', 'np.nonzero', (['col_2'], {}), '(col_2)\n', (19204, 19211), True, 'import numpy as np\n'), ((24369, 24404), 'numpy.where', 'np.where', (['(Ytest[YR == i] == j)', '(1)', '(0)'], {}), '(Ytest[YR == i] == j, 1, 0)\n', (24377, 24404), True, 'import numpy as np\n'), ((24489, 24512), 'numpy.where', 'np.where', (['(YR == i)', '(1)', '(0)'], {}), '(YR == i, 1, 0)\n', (24497, 24512), True, 'import numpy as np\n'), ((26508, 26566), 'numpy.linspace', 'np.linspace', (['(1)', 'nbr_l'], {'num': 'nbr_l', 'endpoint': '(True)', 'dtype': 'int'}), '(1, nbr_l, num=nbr_l, endpoint=True, dtype=int)\n', (26519, 26566), True, 'import numpy as np\n'), ((26804, 26862), 'numpy.linspace', 'np.linspace', (['(1)', 'nbr_l'], {'num': 'nbr_l', 'endpoint': '(True)', 'dtype': 'int'}), '(1, nbr_l, num=nbr_l, endpoint=True, dtype=int)\n', (26815, 26862), True, 'import numpy as np\n'), ((28135, 28157), 'numpy.where', 'np.where', (['(Y == i)', '(1)', '(0)'], {}), '(Y == i, 1, 0)\n', (28143, 28157), True, 'import numpy as np\n'), ((29345, 29367), 'numpy.where', 'np.where', (['(Y == i)', '(1)', '(0)'], {}), '(Y == i, 1, 0)\n', (29353, 29367), True, 'import numpy as np\n'), ((31165, 31197), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'categories': '"""auto"""'}), "(categories='auto')\n", (31178, 31197), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((31944, 31963), 'numpy.matmul', 'np.matmul', (['X', 'w_loc'], {}), '(X, w_loc)\n', (31953, 31963), True, 'import numpy as np\n'), ((33306, 33338), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'categories': '"""auto"""'}), "(categories='auto')\n", (33319, 33338), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((34569, 34578), 'numpy.abs', 'np.abs', (['V'], {}), '(V)\n', (34575, 34578), True, 'import numpy as np\n'), ((34813, 34834), 'numpy.matmul', 'np.matmul', (['Y.T', 'Z_old'], {}), '(Y.T, Z_old)\n', (34822, 34834), True, 'import numpy as np\n'), ((35218, 35238), 'numpy.matmul', 'np.matmul', (['Y', 'mu_new'], {}), '(Y, mu_new)\n', (35227, 35238), True, 'import numpy as np\n'), ((35241, 35260), 'numpy.matmul', 'np.matmul', (['X', 'w_new'], {}), '(X, w_new)\n', (35250, 35260), True, 'import numpy as np\n'), ((35283, 35317), 'numpy.linalg.norm', 'np.linalg.norm', (['(Ik - mu_new)', '"""fro"""'], {}), "(Ik - mu_new, 'fro')\n", (35297, 35317), True, 'import numpy as np\n'), ((36663, 36695), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'categories': '"""auto"""'}), "(categories='auto')\n", (36676, 36695), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((38091, 38112), 'numpy.matmul', 'np.matmul', (['Y.T', 'Z_old'], {}), '(Y.T, Z_old)\n', (38100, 38112), True, 'import numpy as np\n'), ((38428, 38448), 'numpy.matmul', 'np.matmul', (['Y', 'mu_new'], {}), '(Y, mu_new)\n', (38437, 38448), True, 'import numpy as np\n'), ((38451, 38466), 'numpy.matmul', 'np.matmul', (['X', 'w'], {}), '(X, w)\n', (38460, 38466), True, 'import numpy as np\n'), ((38493, 38527), 'numpy.linalg.norm', 'np.linalg.norm', (['(Ik - mu_new)', '"""fro"""'], {}), "(Ik - mu_new, 'fro')\n", (38507, 38527), True, 'import numpy as np\n'), ((42126, 42158), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'categories': '"""auto"""'}), "(categories='auto')\n", (42139, 42158), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((57383, 57415), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'categories': '"""auto"""'}), "(categories='auto')\n", (57396, 57415), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((64824, 64856), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'categories': '"""auto"""'}), "(categories='auto')\n", (64837, 64856), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((68897, 68902), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (68900, 68902), False, 'from sklearn.svm import SVC\n'), ((81186, 81191), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (81189, 81191), False, 'from sklearn.svm import SVC\n'), ((89966, 90020), 'numpy.linspace', 'np.linspace', (['(1)', 'niter', 'niter'], {'endpoint': '(True)', 'dtype': 'int'}), '(1, niter, niter, endpoint=True, dtype=int)\n', (89977, 90020), True, 'import numpy as np\n'), ((98307, 98361), 'numpy.linspace', 'np.linspace', (['(1)', 'niter', 'niter'], {'endpoint': '(True)', 'dtype': 'int'}), '(1, niter, niter, endpoint=True, dtype=int)\n', (98318, 98361), True, 'import numpy as np\n'), ((34993, 35009), 'numpy.matmul', 'np.matmul', (['Y', 'mu'], {}), '(Y, mu)\n', (35002, 35009), True, 'import numpy as np\n'), ((35012, 35027), 'numpy.matmul', 'np.matmul', (['X', 'w'], {}), '(X, w)\n', (35021, 35027), True, 'import numpy as np\n'), ((38220, 38236), 'numpy.matmul', 'np.matmul', (['Y', 'mu'], {}), '(Y, mu)\n', (38229, 38236), True, 'import numpy as np\n'), ((38239, 38254), 'numpy.matmul', 'np.matmul', (['X', 'w'], {}), '(X, w)\n', (38248, 38254), True, 'import numpy as np\n'), ((68116, 68132), 'numpy.unique', 'np.unique', (['Ytest'], {}), '(Ytest)\n', (68125, 68132), True, 'import numpy as np\n'), ((70537, 70553), 'numpy.unique', 'np.unique', (['Ytest'], {}), '(Ytest)\n', (70546, 70553), True, 'import numpy as np\n'), ((83626, 83642), 'numpy.unique', 'np.unique', (['Ytest'], {}), '(Ytest)\n', (83635, 83642), True, 'import numpy as np\n'), ((1310, 1331), 'numpy.arange', 'np.arange', (['y.shape[0]'], {}), '(y.shape[0])\n', (1319, 1331), True, 'import numpy as np\n'), ((1241, 1255), 'numpy.absolute', 'np.absolute', (['y'], {}), '(y)\n', (1252, 1255), True, 'import numpy as np\n')] |
import numpy
from scipy.optimize import curve_fit, fsolve
import os, os.path
import matplotlib.pyplot as plt
from scipy.constants import pi
plt.style.use("science")
def fit_para(L, d, eps_2D):
return (eps_2D - 1) * d / L + 1
def fit_vert(L, d, eps_2D):
return 1 / (d / L * (1 / eps_2D - 1) + 1)
root = "../../data/distance/"
g = os.walk(root)
names = next(g)[1]
tags = {
"mos2": "MoS$_{2}$",
"mose2": "MoSe$_{2}$",
"mote2": "MoTe$_{2}$",
"ws2": "WS$_{2}$",
"wse2": "WSe$_{2}$",
"wte2": "WTe$_{2}$",
}
limits = {
"mos2": (4.98, 6.15),
"mose2": (5.60, 6.46),
"mote2": (6.12, 6.98),
"ws2": (5.00, 6.15),
"wse2": (5.42, 6.49),
"wte2": (6.33, 7.06),
}
# limits = {
# "mos2": (5.22, 6.15),
# "mose2": (5.73, 6.46),
# "mote2": (6.37, 6.98),
# "ws2": (5.20, 6.15),
# "wse2": (5.75, 6.49),
# "wte2": (6.38, 7.06),
# }
raw_data_para = dict()
raw_data_perp = dict()
fit_all_para = dict() # eps_2D, delta
fit_all_perp = dict() # eps_2D, delta
def combine_data():
for i, item in enumerate(g):
if "old" in item:
continue
for f in item[2]:
f_path = os.path.join(item[0], f)
if "agr" not in f_path:
continue
print(f_path)
data = numpy.genfromtxt(f_path,
delimiter=" "
# skip_header=297,
# skip_footer=2
)
L = data[:, 0]
eps_SL = data[:, 1]
if "par" in f_path:
raw_data_para[names[i]] = (L, eps_SL)
param, _ = curve_fit(fit_para, L[1:], eps_SL[1:],
p0=(5, 10),
bounds=((0.5, 1.0),
(10, 50))
)
fit_all_para[names[i]] = param
elif "perp" in f_path:
raw_data_perp[names[i]] = (L, eps_SL)
param, _ = curve_fit(fit_vert, L[1:], eps_SL[1:],
p0=(5, 10),
bounds=((0.5, 1.0),
(10, 50))
)
fit_all_perp[names[i]] = param
combine_data()
print(fit_all_para, fit_all_perp)
fig, ax = plt.subplots(1, 3, figsize=(8.5, 2.4))
def plot_diff():
# ax 0: diff
diff = []
name_disp = []
for n, name in tags.items():
delta_para, _ = fit_all_para[n]
delta_perp, _ = fit_all_perp[n]
name_disp.append("2H-" + name)
# diff.append(delta_para / delta_perp)
diff.append((delta_para - delta_perp) / delta_perp * 100)
ax[0].axhline(y=0, alpha=0.8)
ax[0].bar(range(len(name_disp)), diff, width=0.6)
ax[0].set_xticks(range(len(name_disp)))
ax[0].set_xticklabels(name_disp, rotation=-30)
ax[0].set_ylabel("Estimation Error (%) of $\\delta_{\\mathrm{2D}}$")
# ""between $\\delta_{\\mathrm{2D}}^{{\parallel}, \\mathrm{fit}}$ and $\\delta_{\\mathrm{2D}}^{\\perp, \\mathrm{fit}}$ ($\\mathrm{\\AA{}}$)")
def plot_type1():
# ax 1:
name_disp = []
for n, name in tags.items():
L, eps_para = raw_data_para[n]
L = L[6]
eps_para = eps_para[6]
delta_para, _ = fit_all_para[n]
# name_disp.append("2H-" + name)
dmin, dmax = limits[n]
dd_ = numpy.linspace(dmin, dmax, 256)
xx = (dd_ - dmin) / (dmax - dmin) # normalized xx
f = dd_ / L
eps_2D_para = (eps_para + f - 1) / f
x_mark = (delta_para - dmin) / (dmax - dmin)
print(name, delta_para, dmin, dmax)
f_p = delta_para / L
eps_2D = (eps_para + f_p - 1) / f_p
ax[1].plot(xx, eps_2D_para, label=name)
ax[1].plot(x_mark, eps_2D, "*")
for n, name in tags.items():
L, eps_perp = raw_data_perp[n]
L = L[6]
eps_perp = eps_perp[6]
delta_perp, _ = fit_all_perp[n]
name_disp.append("2H-" + name)
dmin, dmax = limits[n]
dd_ = numpy.linspace(dmin, dmax, 256)
xx = (dd_ - dmin) / (dmax - dmin) # normalized xx
f = dd_ / L
eps_2D_perp = f / (1 / eps_perp + f - 1)
x_mark = (delta_perp - dmin) / (dmax - dmin)
f_p = delta_perp / L
eps_2D = 1 / ((1 / eps_perp + f_p - 1) / f_p)
ax[2].plot(xx, eps_2D_perp, label=name)
ax[2].plot(x_mark, eps_2D, "*")
def plot_type2():
# ax 1:
name_disp = []
for n, name in tags.items():
L, eps_para = raw_data_para[n]
L = L[6]
eps_para = eps_para[6]
delta_para, _ = fit_all_para[n]
delta_perp, _ = fit_all_perp[n]
delta_avg = (delta_para + delta_perp) / 2
name_disp.append("2H-" + name)
# xx = numpy.linspace(-0.25, 0.25, 256)
# dd_ = xx + delta_avg
diff = 7.5
xx = numpy.linspace(-diff / 100, diff / 100, 256)
dd_ = (1 + xx) * delta_avg
f = dd_ / L
eps_2D_para = (eps_para + f - 1) / f
l, = ax[1].plot(xx * 100, eps_2D_para, label="2H-" + name)
ax[1].plot(xx[::20] * 100, eps_2D_para[::20], "o", markersize=4, color=l.get_c())
for n, name in tags.items():
L, eps_perp = raw_data_perp[n]
L = L[6]
eps_perp = eps_perp[6]
delta_perp, _ = fit_all_perp[n]
delta_para, _ = fit_all_para[n]
# delta_avg = (delta_para + delta_perp) / 2
delta_avg = delta_perp
name_disp.append("2H-" + name)
name_disp.append("2H-" + name)
diff = 7.5
xx = numpy.linspace(-diff / 100, diff / 100, 256)
dd_ = (1 + xx) * delta_avg
# xx = numpy.linspace(-0.25, 0.25, 256)
# dd_ = xx + delta_avg
f = dd_ / L
eps_2D_perp = f / (1 / eps_perp + f - 1)
cond = numpy.where((eps_2D_perp > 0) & (eps_2D_perp < 1000))
l, = ax[2].plot(xx[cond] * 100, eps_2D_perp[cond], label="2H-" + name)
ax[2].plot(xx[::20] * 100, eps_2D_perp[::20], "o", markersize=4, color=l.get_c())
# ax[1].set_ylim(14, 23)
ax[1].set_xlabel("Uncertainty of $\\delta^{*}_{\\mathrm{2D}}$ (%)")
ax[1].set_ylabel("Estimated $\\varepsilon_{\\mathrm{2D}}^{\\parallel}$")
ax[2].set_xlim(-7.5, 7.5)
ax[2].set_ylim(15, 500)
ax[2].set_xlabel("Uncertainty of $\\delta^{*}_{\\mathrm{2D}}$ (%)")
ax[2].set_ylabel("Estimated $\\varepsilon_{\\mathrm{2D}}^{\\perp}$")
plot_diff()
plot_type2()
# ax[0].set_ylabel("")
# ax[1].set_ylim(10, 25)
ax[1].legend()
# ax[2].set_yscale("log")
# ax[0].set_xticklabels(name_disp)
fig.tight_layout()
fig.savefig("../../tmp_img/emt_res.svg")
| [
"os.path.join",
"os.walk",
"numpy.genfromtxt",
"scipy.optimize.curve_fit",
"matplotlib.pyplot.style.use",
"numpy.where",
"numpy.linspace",
"matplotlib.pyplot.subplots"
] | [((140, 164), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""science"""'], {}), "('science')\n", (153, 164), True, 'import matplotlib.pyplot as plt\n'), ((341, 354), 'os.walk', 'os.walk', (['root'], {}), '(root)\n', (348, 354), False, 'import os, os.path\n'), ((2402, 2440), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(8.5, 2.4)'}), '(1, 3, figsize=(8.5, 2.4))\n', (2414, 2440), True, 'import matplotlib.pyplot as plt\n'), ((3466, 3497), 'numpy.linspace', 'numpy.linspace', (['dmin', 'dmax', '(256)'], {}), '(dmin, dmax, 256)\n', (3480, 3497), False, 'import numpy\n'), ((4147, 4178), 'numpy.linspace', 'numpy.linspace', (['dmin', 'dmax', '(256)'], {}), '(dmin, dmax, 256)\n', (4161, 4178), False, 'import numpy\n'), ((5001, 5045), 'numpy.linspace', 'numpy.linspace', (['(-diff / 100)', '(diff / 100)', '(256)'], {}), '(-diff / 100, diff / 100, 256)\n', (5015, 5045), False, 'import numpy\n'), ((5699, 5743), 'numpy.linspace', 'numpy.linspace', (['(-diff / 100)', '(diff / 100)', '(256)'], {}), '(-diff / 100, diff / 100, 256)\n', (5713, 5743), False, 'import numpy\n'), ((5942, 5995), 'numpy.where', 'numpy.where', (['((eps_2D_perp > 0) & (eps_2D_perp < 1000))'], {}), '((eps_2D_perp > 0) & (eps_2D_perp < 1000))\n', (5953, 5995), False, 'import numpy\n'), ((1194, 1218), 'os.path.join', 'os.path.join', (['item[0]', 'f'], {}), '(item[0], f)\n', (1206, 1218), False, 'import os, os.path\n'), ((1325, 1364), 'numpy.genfromtxt', 'numpy.genfromtxt', (['f_path'], {'delimiter': '""" """'}), "(f_path, delimiter=' ')\n", (1341, 1364), False, 'import numpy\n'), ((1694, 1779), 'scipy.optimize.curve_fit', 'curve_fit', (['fit_para', 'L[1:]', 'eps_SL[1:]'], {'p0': '(5, 10)', 'bounds': '((0.5, 1.0), (10, 50))'}), '(fit_para, L[1:], eps_SL[1:], p0=(5, 10), bounds=((0.5, 1.0), (10,\n 50)))\n', (1703, 1779), False, 'from scipy.optimize import curve_fit, fsolve\n'), ((2075, 2160), 'scipy.optimize.curve_fit', 'curve_fit', (['fit_vert', 'L[1:]', 'eps_SL[1:]'], {'p0': '(5, 10)', 'bounds': '((0.5, 1.0), (10, 50))'}), '(fit_vert, L[1:], eps_SL[1:], p0=(5, 10), bounds=((0.5, 1.0), (10,\n 50)))\n', (2084, 2160), False, 'from scipy.optimize import curve_fit, fsolve\n')] |
from nose.tools import raises
import os
import shutil
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import flopy
pthtest = os.path.join("..", "examples", "data", "mfgrd_test")
flowpth = os.path.join("..", "examples", "data", "mf6-freyberg")
tpth = os.path.join("temp", "t029")
# remove the directory if it exists
if os.path.isdir(tpth):
shutil.rmtree(tpth)
# make the directory
os.makedirs(tpth)
def test_mfgrddis_MfGrdFile():
fn = os.path.join(pthtest, "nwtp3.dis.grb")
grb = flopy.mf6.utils.MfGrdFile(fn, verbose=True)
nodes = grb.nodes
ia = grb.ia
shape = ia.shape[0]
assert shape == nodes + 1, "ia size ({}) not equal to {}".format(
shape, nodes + 1
)
nnz = ia[-1]
ja = grb.ja
shape = ja.shape[0]
assert shape == nnz, "ja size ({}) not equal to {}".format(shape, nnz)
modelgrid = grb.modelgrid
assert isinstance(
modelgrid, flopy.discretization.StructuredGrid
), "invalid grid type"
def test_mfgrddis_modelgrid():
fn = os.path.join(pthtest, "nwtp3.dis.grb")
modelgrid = flopy.discretization.StructuredGrid.from_binary_grid_file(
fn, verbose=True
)
assert isinstance(
modelgrid, flopy.discretization.StructuredGrid
), "invalid grid type"
lc = modelgrid.plot()
assert isinstance(
lc, matplotlib.collections.LineCollection
), "could not plot grid object created from {}".format(fn)
plt.close()
extents = modelgrid.extent
errmsg = (
"extents {} of {} ".format(extents, fn)
+ "does not equal (0.0, 8000.0, 0.0, 8000.0)"
)
assert extents == (0.0, 8000.0, 0.0, 8000.0), errmsg
ncpl = modelgrid.ncol * modelgrid.nrow
assert modelgrid.ncpl == ncpl, "ncpl ({}) does not equal {}".format(
modelgrid.ncpl, ncpl
)
nvert = modelgrid.nvert
iverts = modelgrid.iverts
maxvertex = max([max(sublist[1:]) for sublist in iverts])
assert maxvertex + 1 == nvert, "nvert ({}) does not equal {}".format(
maxvertex + 1, nvert
)
verts = modelgrid.verts
assert (
nvert == verts.shape[0]
), "number of vertex (x, y) pairs ({}) ".format(
verts.shape[0]
) + "does not equal {}".format(
nvert
)
def test_mfgrddisv_MfGrdFile():
fn = os.path.join(pthtest, "flow.disv.grb")
grb = flopy.mf6.utils.MfGrdFile(fn, verbose=True)
nodes = grb.nodes
ia = grb.ia
shape = ia.shape[0]
assert shape == nodes + 1, "ia size ({}) not equal to {}".format(
shape, nodes + 1
)
nnz = ia[-1]
ja = grb.ja
shape = ja.shape[0]
assert shape == nnz, "ja size ({}) not equal to {}".format(shape, nnz)
mg = grb.modelgrid
assert isinstance(
mg, flopy.discretization.VertexGrid
), "invalid grid type ({})".format(type(mg))
def test_mfgrddisv_modelgrid():
fn = os.path.join(pthtest, "flow.disv.grb")
mg = flopy.discretization.VertexGrid.from_binary_grid_file(
fn, verbose=True
)
assert isinstance(
mg, flopy.discretization.VertexGrid
), "invalid grid type ({})".format(type(mg))
ncpl = 218
assert mg.ncpl == ncpl, "ncpl ({}) does not equal {}".format(mg.ncpl, ncpl)
lc = mg.plot()
assert isinstance(
lc, matplotlib.collections.LineCollection
), "could not plot grid object created from {}".format(fn)
plt.close("all")
extents = mg.extent
extents0 = (0.0, 700.0, 0.0, 700.0)
errmsg = "extents {} of {} ".format(
extents, fn
) + "does not equal {}".format(extents0)
assert extents == extents0, errmsg
nvert = mg.nvert
iverts = mg.iverts
maxvertex = max([max(sublist[1:]) for sublist in iverts])
assert maxvertex + 1 == nvert, "nvert ({}) does not equal {}".format(
maxvertex + 1, nvert
)
verts = mg.verts
assert (
nvert == verts.shape[0]
), "number of vertex (x, y) pairs ({}) ".format(
verts.shape[0]
) + "does not equal {}".format(
nvert
)
cellxy = np.column_stack((mg.xyzcellcenters[:2]))
errmsg = "shape of flow.disv centroids {} not equal to (218, 2).".format(
cellxy.shape
)
assert cellxy.shape == (218, 2), errmsg
return
def test_mfgrddisu_MfGrdFile():
fn = os.path.join(pthtest, "keating.disu.grb")
grb = flopy.mf6.utils.MfGrdFile(fn, verbose=True)
nodes = grb.nodes
ia = grb.ia
shape = ia.shape[0]
assert shape == nodes + 1, "ia size ({}) not equal to {}".format(
shape, nodes + 1
)
nnz = ia[-1]
ja = grb.ja
shape = ja.shape[0]
assert shape == nnz, "ja size ({}) not equal to {}".format(shape, nnz)
mg = grb.modelgrid
assert isinstance(
mg, flopy.discretization.UnstructuredGrid
), "invalid grid type ({})".format(type(mg))
@raises(TypeError)
def test_mfgrddisu_modelgrid_fail():
fn = os.path.join(pthtest, "flow.disu.grb")
mg = flopy.discretization.UnstructuredGrid.from_binary_grid_file(
fn, verbose=True
)
def test_mfgrddisu_modelgrid():
fn = os.path.join(pthtest, "keating.disu.grb")
mg = flopy.discretization.UnstructuredGrid.from_binary_grid_file(
fn, verbose=True
)
assert isinstance(
mg, flopy.discretization.UnstructuredGrid
), "invalid grid type ({})".format(type(mg))
lc = mg.plot()
assert isinstance(
lc, matplotlib.collections.LineCollection
), "could not plot grid object created from {}".format(fn)
plt.close("all")
extents = mg.extent
extents0 = (0.0, 10000.0, 0.0, 1.0)
errmsg = "extents {} of {} ".format(
extents, fn
) + "does not equal {}".format(extents0)
assert extents == extents0, errmsg
nvert = mg.nvert
iverts = mg.iverts
maxvertex = max([max(sublist[1:]) for sublist in iverts])
assert maxvertex + 1 == nvert, "nvert ({}) does not equal {}".format(
maxvertex + 1, nvert
)
verts = mg.verts
assert (
nvert == verts.shape[0]
), "number of vertex (x, y) pairs ({}) ".format(
verts.shape[0]
) + "does not equal {}".format(
nvert
)
return
def test_faceflows():
sim = flopy.mf6.MFSimulation.load(
sim_name="freyberg",
exe_name="mf6",
sim_ws=flowpth,
)
# change the simulation workspace
sim.set_sim_path(tpth)
# write the model simulation files
sim.write_simulation()
# run the simulation
sim.run_simulation()
# get output
gwf = sim.get_model("freyberg")
head = gwf.output.head().get_data()
cbc = gwf.output.budget()
spdis = cbc.get_data(text="DATA-SPDIS")[0]
flowja = cbc.get_data(text="FLOW-JA-FACE")[0]
frf, fff, flf = flopy.mf6.utils.get_structured_faceflows(
flowja,
grb_file=os.path.join(tpth, "freyberg.dis.grb"),
)
Qx, Qy, Qz = flopy.utils.postprocessing.get_specific_discharge(
(frf, fff, flf),
gwf,
)
sqx, sqy, sqz = flopy.utils.postprocessing.get_specific_discharge(
(frf, fff, flf),
gwf,
head=head,
)
qx, qy, qz = flopy.utils.postprocessing.get_specific_discharge(spdis, gwf)
fig = plt.figure(figsize=(12, 6), constrained_layout=True)
ax = fig.add_subplot(1, 3, 1, aspect="equal")
mm = flopy.plot.PlotMapView(model=gwf, ax=ax)
Q0 = mm.plot_vector(Qx, Qy)
assert isinstance(
Q0, matplotlib.quiver.Quiver
), "Q0 not type matplotlib.quiver.Quiver"
ax = fig.add_subplot(1, 3, 2, aspect="equal")
mm = flopy.plot.PlotMapView(model=gwf, ax=ax)
q0 = mm.plot_vector(sqx, sqy)
assert isinstance(
q0, matplotlib.quiver.Quiver
), "q0 not type matplotlib.quiver.Quiver"
ax = fig.add_subplot(1, 3, 3, aspect="equal")
mm = flopy.plot.PlotMapView(model=gwf, ax=ax)
q1 = mm.plot_vector(qx, qy)
assert isinstance(
q1, matplotlib.quiver.Quiver
), "q1 not type matplotlib.quiver.Quiver"
plt.close("all")
# uv0 = np.column_stack((q0.U, q0.V))
# uv1 = np.column_stack((q1.U, q1.V))
# diff = uv1 - uv0
# assert (
# np.allclose(uv0, uv1)
# ), "get_faceflows quivers are not equal to specific discharge vectors"
return
def test_flowja_residuals():
sim = flopy.mf6.MFSimulation.load(
sim_name="freyberg",
exe_name="mf6",
sim_ws=tpth,
)
# get output
gwf = sim.get_model("freyberg")
grb_file = os.path.join(tpth, "freyberg.dis.grb")
cbc = gwf.output.budget()
spdis = cbc.get_data(text="DATA-SPDIS")[0]
flowja = cbc.get_data(text="FLOW-JA-FACE")[0]
residual = flopy.mf6.utils.get_residuals(flowja, grb_file=grb_file)
qx, qy, qz = flopy.utils.postprocessing.get_specific_discharge(spdis, gwf)
fig = plt.figure(figsize=(6, 9), constrained_layout=True)
ax = fig.add_subplot(1, 1, 1, aspect="equal")
mm = flopy.plot.PlotMapView(model=gwf, ax=ax)
r0 = mm.plot_array(residual)
assert isinstance(
r0, matplotlib.collections.QuadMesh
), "r0 not type matplotlib.collections.QuadMesh"
q0 = mm.plot_vector(qx, qy)
assert isinstance(
q0, matplotlib.quiver.Quiver
), "q0 not type matplotlib.quiver.Quiver"
mm.plot_grid(lw=0.5, color="black")
mm.plot_ibound()
plt.colorbar(r0, shrink=0.5)
plt.title("Cell Residual, cubic meters per second")
plt.close("all")
return
@raises(ValueError)
def test_faceflows_empty():
flowja = np.zeros(10, dtype=np.float64)
frf, fff, flf = flopy.mf6.utils.get_structured_faceflows(flowja)
@raises(ValueError)
def test_faceflows_jaempty():
flowja = np.zeros(10, dtype=np.float64)
ia = np.zeros(10, dtype=np.int32)
frf, fff, flf = flopy.mf6.utils.get_structured_faceflows(flowja, ia=ia)
@raises(ValueError)
def test_faceflows_iaempty():
flowja = np.zeros(10, dtype=np.float64)
ja = np.zeros(10, dtype=np.int32)
_v = flopy.mf6.utils.get_structured_faceflows(flowja, ja=ja)
@raises(ValueError)
def test_faceflows_flowja_size():
flowja = np.zeros(10, dtype=np.float64)
ia = np.zeros(5, dtype=np.int32)
ja = np.zeros(5, dtype=np.int32)
_v = flopy.mf6.utils.get_structured_faceflows(flowja, ia=ia, ja=ja)
@raises(ValueError)
def test_residuals_jaempty():
flowja = np.zeros(10, dtype=np.float64)
ia = np.zeros(10, dtype=np.int32)
_v = flopy.mf6.utils.get_residuals(flowja, ia=ia)
@raises(ValueError)
def test_residuals_iaempty():
flowja = np.zeros(10, dtype=np.float64)
ja = np.zeros(10, dtype=np.int32)
_v = flopy.mf6.utils.get_residuals(flowja, ja=ja)
if __name__ == "__main__":
# test_mfgrddis_MfGrdFile()
# test_mfgrddis_modelgrid()
# test_mfgrddisv_MfGrdFile()
# test_mfgrddisv_modelgrid()
# test_mfgrddisu_MfGrdFile()
# test_mfgrddisu_modelgrid()
test_faceflows()
test_flowja_residuals()
| [
"matplotlib.pyplot.title",
"flopy.mf6.utils.MfGrdFile",
"matplotlib.pyplot.figure",
"shutil.rmtree",
"os.path.join",
"flopy.mf6.MFSimulation.load",
"nose.tools.raises",
"flopy.mf6.utils.get_structured_faceflows",
"matplotlib.pyplot.close",
"matplotlib.pyplot.colorbar",
"flopy.discretization.Stru... | [((149, 201), 'os.path.join', 'os.path.join', (['""".."""', '"""examples"""', '"""data"""', '"""mfgrd_test"""'], {}), "('..', 'examples', 'data', 'mfgrd_test')\n", (161, 201), False, 'import os\n'), ((212, 266), 'os.path.join', 'os.path.join', (['""".."""', '"""examples"""', '"""data"""', '"""mf6-freyberg"""'], {}), "('..', 'examples', 'data', 'mf6-freyberg')\n", (224, 266), False, 'import os\n'), ((275, 303), 'os.path.join', 'os.path.join', (['"""temp"""', '"""t029"""'], {}), "('temp', 't029')\n", (287, 303), False, 'import os\n'), ((343, 362), 'os.path.isdir', 'os.path.isdir', (['tpth'], {}), '(tpth)\n', (356, 362), False, 'import os\n'), ((409, 426), 'os.makedirs', 'os.makedirs', (['tpth'], {}), '(tpth)\n', (420, 426), False, 'import os\n'), ((4828, 4845), 'nose.tools.raises', 'raises', (['TypeError'], {}), '(TypeError)\n', (4834, 4845), False, 'from nose.tools import raises\n'), ((9398, 9416), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (9404, 9416), False, 'from nose.tools import raises\n'), ((9561, 9579), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (9567, 9579), False, 'from nose.tools import raises\n'), ((9771, 9789), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (9777, 9789), False, 'from nose.tools import raises\n'), ((9970, 9988), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (9976, 9988), False, 'from nose.tools import raises\n'), ((10216, 10234), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (10222, 10234), False, 'from nose.tools import raises\n'), ((10404, 10422), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (10410, 10422), False, 'from nose.tools import raises\n'), ((368, 387), 'shutil.rmtree', 'shutil.rmtree', (['tpth'], {}), '(tpth)\n', (381, 387), False, 'import shutil\n'), ((469, 507), 'os.path.join', 'os.path.join', (['pthtest', '"""nwtp3.dis.grb"""'], {}), "(pthtest, 'nwtp3.dis.grb')\n", (481, 507), False, 'import os\n'), ((518, 561), 'flopy.mf6.utils.MfGrdFile', 'flopy.mf6.utils.MfGrdFile', (['fn'], {'verbose': '(True)'}), '(fn, verbose=True)\n', (543, 561), False, 'import flopy\n'), ((1037, 1075), 'os.path.join', 'os.path.join', (['pthtest', '"""nwtp3.dis.grb"""'], {}), "(pthtest, 'nwtp3.dis.grb')\n", (1049, 1075), False, 'import os\n'), ((1092, 1167), 'flopy.discretization.StructuredGrid.from_binary_grid_file', 'flopy.discretization.StructuredGrid.from_binary_grid_file', (['fn'], {'verbose': '(True)'}), '(fn, verbose=True)\n', (1149, 1167), False, 'import flopy\n'), ((1454, 1465), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1463, 1465), True, 'import matplotlib.pyplot as plt\n'), ((2308, 2346), 'os.path.join', 'os.path.join', (['pthtest', '"""flow.disv.grb"""'], {}), "(pthtest, 'flow.disv.grb')\n", (2320, 2346), False, 'import os\n'), ((2357, 2400), 'flopy.mf6.utils.MfGrdFile', 'flopy.mf6.utils.MfGrdFile', (['fn'], {'verbose': '(True)'}), '(fn, verbose=True)\n', (2382, 2400), False, 'import flopy\n'), ((2881, 2919), 'os.path.join', 'os.path.join', (['pthtest', '"""flow.disv.grb"""'], {}), "(pthtest, 'flow.disv.grb')\n", (2893, 2919), False, 'import os\n'), ((2929, 3000), 'flopy.discretization.VertexGrid.from_binary_grid_file', 'flopy.discretization.VertexGrid.from_binary_grid_file', (['fn'], {'verbose': '(True)'}), '(fn, verbose=True)\n', (2982, 3000), False, 'import flopy\n'), ((3387, 3403), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (3396, 3403), True, 'import matplotlib.pyplot as plt\n'), ((4042, 4080), 'numpy.column_stack', 'np.column_stack', (['mg.xyzcellcenters[:2]'], {}), '(mg.xyzcellcenters[:2])\n', (4057, 4080), True, 'import numpy as np\n'), ((4286, 4327), 'os.path.join', 'os.path.join', (['pthtest', '"""keating.disu.grb"""'], {}), "(pthtest, 'keating.disu.grb')\n", (4298, 4327), False, 'import os\n'), ((4338, 4381), 'flopy.mf6.utils.MfGrdFile', 'flopy.mf6.utils.MfGrdFile', (['fn'], {'verbose': '(True)'}), '(fn, verbose=True)\n', (4363, 4381), False, 'import flopy\n'), ((4892, 4930), 'os.path.join', 'os.path.join', (['pthtest', '"""flow.disu.grb"""'], {}), "(pthtest, 'flow.disu.grb')\n", (4904, 4930), False, 'import os\n'), ((4940, 5017), 'flopy.discretization.UnstructuredGrid.from_binary_grid_file', 'flopy.discretization.UnstructuredGrid.from_binary_grid_file', (['fn'], {'verbose': '(True)'}), '(fn, verbose=True)\n', (4999, 5017), False, 'import flopy\n'), ((5075, 5116), 'os.path.join', 'os.path.join', (['pthtest', '"""keating.disu.grb"""'], {}), "(pthtest, 'keating.disu.grb')\n", (5087, 5116), False, 'import os\n'), ((5126, 5203), 'flopy.discretization.UnstructuredGrid.from_binary_grid_file', 'flopy.discretization.UnstructuredGrid.from_binary_grid_file', (['fn'], {'verbose': '(True)'}), '(fn, verbose=True)\n', (5185, 5203), False, 'import flopy\n'), ((5500, 5516), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (5509, 5516), True, 'import matplotlib.pyplot as plt\n'), ((6187, 6272), 'flopy.mf6.MFSimulation.load', 'flopy.mf6.MFSimulation.load', ([], {'sim_name': '"""freyberg"""', 'exe_name': '"""mf6"""', 'sim_ws': 'flowpth'}), "(sim_name='freyberg', exe_name='mf6', sim_ws=flowpth\n )\n", (6214, 6272), False, 'import flopy\n'), ((6864, 6935), 'flopy.utils.postprocessing.get_specific_discharge', 'flopy.utils.postprocessing.get_specific_discharge', (['(frf, fff, flf)', 'gwf'], {}), '((frf, fff, flf), gwf)\n', (6913, 6935), False, 'import flopy\n'), ((6979, 7065), 'flopy.utils.postprocessing.get_specific_discharge', 'flopy.utils.postprocessing.get_specific_discharge', (['(frf, fff, flf)', 'gwf'], {'head': 'head'}), '((frf, fff, flf), gwf,\n head=head)\n', (7028, 7065), False, 'import flopy\n'), ((7110, 7171), 'flopy.utils.postprocessing.get_specific_discharge', 'flopy.utils.postprocessing.get_specific_discharge', (['spdis', 'gwf'], {}), '(spdis, gwf)\n', (7159, 7171), False, 'import flopy\n'), ((7183, 7235), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)', 'constrained_layout': '(True)'}), '(figsize=(12, 6), constrained_layout=True)\n', (7193, 7235), True, 'import matplotlib.pyplot as plt\n'), ((7295, 7335), 'flopy.plot.PlotMapView', 'flopy.plot.PlotMapView', ([], {'model': 'gwf', 'ax': 'ax'}), '(model=gwf, ax=ax)\n', (7317, 7335), False, 'import flopy\n'), ((7534, 7574), 'flopy.plot.PlotMapView', 'flopy.plot.PlotMapView', ([], {'model': 'gwf', 'ax': 'ax'}), '(model=gwf, ax=ax)\n', (7556, 7574), False, 'import flopy\n'), ((7775, 7815), 'flopy.plot.PlotMapView', 'flopy.plot.PlotMapView', ([], {'model': 'gwf', 'ax': 'ax'}), '(model=gwf, ax=ax)\n', (7797, 7815), False, 'import flopy\n'), ((7959, 7975), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (7968, 7975), True, 'import matplotlib.pyplot as plt\n'), ((8261, 8338), 'flopy.mf6.MFSimulation.load', 'flopy.mf6.MFSimulation.load', ([], {'sim_name': '"""freyberg"""', 'exe_name': '"""mf6"""', 'sim_ws': 'tpth'}), "(sim_name='freyberg', exe_name='mf6', sim_ws=tpth)\n", (8288, 8338), False, 'import flopy\n'), ((8439, 8477), 'os.path.join', 'os.path.join', (['tpth', '"""freyberg.dis.grb"""'], {}), "(tpth, 'freyberg.dis.grb')\n", (8451, 8477), False, 'import os\n'), ((8622, 8678), 'flopy.mf6.utils.get_residuals', 'flopy.mf6.utils.get_residuals', (['flowja'], {'grb_file': 'grb_file'}), '(flowja, grb_file=grb_file)\n', (8651, 8678), False, 'import flopy\n'), ((8696, 8757), 'flopy.utils.postprocessing.get_specific_discharge', 'flopy.utils.postprocessing.get_specific_discharge', (['spdis', 'gwf'], {}), '(spdis, gwf)\n', (8745, 8757), False, 'import flopy\n'), ((8769, 8820), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 9)', 'constrained_layout': '(True)'}), '(figsize=(6, 9), constrained_layout=True)\n', (8779, 8820), True, 'import matplotlib.pyplot as plt\n'), ((8880, 8920), 'flopy.plot.PlotMapView', 'flopy.plot.PlotMapView', ([], {'model': 'gwf', 'ax': 'ax'}), '(model=gwf, ax=ax)\n', (8902, 8920), False, 'import flopy\n'), ((9277, 9305), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['r0'], {'shrink': '(0.5)'}), '(r0, shrink=0.5)\n', (9289, 9305), True, 'import matplotlib.pyplot as plt\n'), ((9310, 9361), 'matplotlib.pyplot.title', 'plt.title', (['"""Cell Residual, cubic meters per second"""'], {}), "('Cell Residual, cubic meters per second')\n", (9319, 9361), True, 'import matplotlib.pyplot as plt\n'), ((9367, 9383), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (9376, 9383), True, 'import matplotlib.pyplot as plt\n'), ((9458, 9488), 'numpy.zeros', 'np.zeros', (['(10)'], {'dtype': 'np.float64'}), '(10, dtype=np.float64)\n', (9466, 9488), True, 'import numpy as np\n'), ((9509, 9557), 'flopy.mf6.utils.get_structured_faceflows', 'flopy.mf6.utils.get_structured_faceflows', (['flowja'], {}), '(flowja)\n', (9549, 9557), False, 'import flopy\n'), ((9623, 9653), 'numpy.zeros', 'np.zeros', (['(10)'], {'dtype': 'np.float64'}), '(10, dtype=np.float64)\n', (9631, 9653), True, 'import numpy as np\n'), ((9663, 9691), 'numpy.zeros', 'np.zeros', (['(10)'], {'dtype': 'np.int32'}), '(10, dtype=np.int32)\n', (9671, 9691), True, 'import numpy as np\n'), ((9712, 9767), 'flopy.mf6.utils.get_structured_faceflows', 'flopy.mf6.utils.get_structured_faceflows', (['flowja'], {'ia': 'ia'}), '(flowja, ia=ia)\n', (9752, 9767), False, 'import flopy\n'), ((9833, 9863), 'numpy.zeros', 'np.zeros', (['(10)'], {'dtype': 'np.float64'}), '(10, dtype=np.float64)\n', (9841, 9863), True, 'import numpy as np\n'), ((9873, 9901), 'numpy.zeros', 'np.zeros', (['(10)'], {'dtype': 'np.int32'}), '(10, dtype=np.int32)\n', (9881, 9901), True, 'import numpy as np\n'), ((9911, 9966), 'flopy.mf6.utils.get_structured_faceflows', 'flopy.mf6.utils.get_structured_faceflows', (['flowja'], {'ja': 'ja'}), '(flowja, ja=ja)\n', (9951, 9966), False, 'import flopy\n'), ((10036, 10066), 'numpy.zeros', 'np.zeros', (['(10)'], {'dtype': 'np.float64'}), '(10, dtype=np.float64)\n', (10044, 10066), True, 'import numpy as np\n'), ((10076, 10103), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'np.int32'}), '(5, dtype=np.int32)\n', (10084, 10103), True, 'import numpy as np\n'), ((10113, 10140), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'np.int32'}), '(5, dtype=np.int32)\n', (10121, 10140), True, 'import numpy as np\n'), ((10150, 10212), 'flopy.mf6.utils.get_structured_faceflows', 'flopy.mf6.utils.get_structured_faceflows', (['flowja'], {'ia': 'ia', 'ja': 'ja'}), '(flowja, ia=ia, ja=ja)\n', (10190, 10212), False, 'import flopy\n'), ((10278, 10308), 'numpy.zeros', 'np.zeros', (['(10)'], {'dtype': 'np.float64'}), '(10, dtype=np.float64)\n', (10286, 10308), True, 'import numpy as np\n'), ((10318, 10346), 'numpy.zeros', 'np.zeros', (['(10)'], {'dtype': 'np.int32'}), '(10, dtype=np.int32)\n', (10326, 10346), True, 'import numpy as np\n'), ((10356, 10400), 'flopy.mf6.utils.get_residuals', 'flopy.mf6.utils.get_residuals', (['flowja'], {'ia': 'ia'}), '(flowja, ia=ia)\n', (10385, 10400), False, 'import flopy\n'), ((10466, 10496), 'numpy.zeros', 'np.zeros', (['(10)'], {'dtype': 'np.float64'}), '(10, dtype=np.float64)\n', (10474, 10496), True, 'import numpy as np\n'), ((10506, 10534), 'numpy.zeros', 'np.zeros', (['(10)'], {'dtype': 'np.int32'}), '(10, dtype=np.int32)\n', (10514, 10534), True, 'import numpy as np\n'), ((10544, 10588), 'flopy.mf6.utils.get_residuals', 'flopy.mf6.utils.get_residuals', (['flowja'], {'ja': 'ja'}), '(flowja, ja=ja)\n', (10573, 10588), False, 'import flopy\n'), ((6801, 6839), 'os.path.join', 'os.path.join', (['tpth', '"""freyberg.dis.grb"""'], {}), "(tpth, 'freyberg.dis.grb')\n", (6813, 6839), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
## Copyright 2015-2021 PyPSA Developers
## You can find the list of PyPSA Developers at
## https://pypsa.readthedocs.io/en/latest/developers.html
## PyPSA is released under the open source MIT License, see
## https://github.com/PyPSA/PyPSA/blob/master/LICENSE.txt
"""
Tools for fast Linear Problem file writing. This module contains
- io functions for writing out variables, constraints and objective
into a lp file.
- functions to create lp format based linear expression
- solver functions which read the lp file, run the problem and return the
solution
This module supports the linear optimal power flow calculation without using
pyomo (see module linopt.py)
"""
__author__ = "PyPSA Developers, see https://pypsa.readthedocs.io/en/latest/developers.html"
__copyright__ = ("Copyright 2015-2021 PyPSA Developers, see https://pypsa.readthedocs.io/en/latest/developers.html, "
"MIT License")
from .descriptors import Dict
import pandas as pd
import os
import logging, re, io, subprocess
import numpy as np
from pandas import IndexSlice as idx
from importlib.util import find_spec
from distutils.version import LooseVersion
logger = logging.getLogger(__name__)
# =============================================================================
# Front end functions
# =============================================================================
def define_variables(n, lower, upper, name, attr='', axes=None, spec='', mask=None):
"""
Defines variable(s) for pypsa-network with given lower bound(s) and upper
bound(s). The variables are stored in the network object under n.vars with
key of the variable name. If multiple variables are defined at ones, at
least one of lower and upper has to be an array (including pandas) of
shape > (1,) or axes have to define the dimensions of the variables.
Parameters
----------
n : pypsa.Network
lower : pd.Series/pd.DataFrame/np.array/str/float
lower bound(s) for the variable(s)
upper : pd.Series/pd.DataFrame/np.array/str/float
upper bound(s) for the variable(s)
name : str
general name of the variable (or component which the variable is
referring to). The variable will then be stored under:
* n.vars[name].pnl if the variable is two-dimensional
* n.vars[name].df if the variable is one-dimensional
but can easily be accessed with :func:`get_var(n, name, attr)`
attr : str default ''
Specifying name of the variable, defines under which name the variable(s)
are stored in n.vars[name].pnl if two-dimensional or in n.vars[name].df
if one-dimensional
axes : pd.Index or tuple of pd.Index objects, default None
Specifies the axes and therefore the shape of the variables if bounds
are single strings or floats. This is helpful when multiple variables
have the same upper and lower bound.
mask: pd.DataFrame/np.array
Boolean mask with False values for variables which are skipped.
The shape of the mask has to match the shape the added variables.
Example
--------
Let's say we want to define a demand-side-managed load at each bus of
network n, which has a minimum of 0 and a maximum of 10. We then define
lower bound (lb) and upper bound (ub) and pass it to define_variables
>>> from pypsa.linopt import define_variables, get_var
>>> lb = pd.DataFrame(0, index=n.snapshots, columns=n.buses.index)
>>> ub = pd.DataFrame(10, index=n.snapshots, columns=n.buses.index)
>>> define_variables(n, lb, ub, 'DSM', 'variableload')
Now the variables can be accessed by :func:`pypsa.linopt.get_var` using
>>> variables = get_var(n, 'DSM', 'variableload')
Note that this is usefull for the `extra_functionality` argument.
"""
var = write_bound(n, lower, upper, axes, mask)
set_varref(n, var, name, attr, spec=spec)
return var
def define_binaries(n, axes, name, attr='', spec='', mask=None):
"""
Defines binary-variable(s) for pypsa-network. The variables are stored
in the network object under n.vars with key of the variable name.
For each entry for the pd.Series of pd.DataFrame spanned by the axes
argument the function defines a binary.
Parameters
----------
n : pypsa.Network
axes : pd.Index or tuple of pd.Index objects
Specifies the axes and therefore the shape of the variables.
name : str
general name of the variable (or component which the variable is
referring to). The variable will then be stored under:
* n.vars[name].pnl if the variable is two-dimensional
* n.vars[name].df if the variable is one-dimensional
attr : str default ''
Specifying name of the variable, defines under which name the variable(s)
are stored in n.vars[name].pnl if two-dimensional or in n.vars[name].df
if one-dimensional
mask: pd.DataFrame/np.array
Boolean mask with False values for variables which are skipped.
The shape of the mask has to match the shape given by axes.
See also
---------
define_variables
"""
var = write_binary(n, axes)
set_varref(n, var, name, attr, spec=spec)
return var
def define_constraints(n, lhs, sense, rhs, name, attr='', axes=None, spec='',
mask=None):
"""
Defines constraint(s) for pypsa-network with given left hand side (lhs),
sense and right hand side (rhs). The constraints are stored in the network
object under n.cons with key of the constraint name. If multiple constraints
are defined at ones, only using np.arrays, then the axes argument can be used
for defining the axes for the constraints (this is especially recommended
for time-dependent constraints). If one of lhs, sense and rhs is a
pd.Series/pd.DataFrame the axes argument is not necessary.
Parameters
----------
n: pypsa.Network
lhs: pd.Series/pd.DataFrame/np.array/str/float
left hand side of the constraint(s), created with
:func:`pypsa.linot.linexpr`.
sense: pd.Series/pd.DataFrame/np.array/str/float
sense(s) of the constraint(s)
rhs: pd.Series/pd.DataFrame/np.array/str/float
right hand side of the constraint(s), must only contain pure constants,
no variables
name: str
general name of the constraint (or component which the constraint is
referring to). The constraint will then be stored under:
* n.cons[name].pnl if the constraint is two-dimensional
* n.cons[name].df if the constraint is one-dimensional
attr: str default ''
Specifying name of the constraint, defines under which name the
constraint(s) are stored in n.cons[name].pnl if two-dimensional or in
n.cons[name].df if one-dimensional
axes: pd.Index or tuple of pd.Index objects, default None
Specifies the axes if all of lhs, sense and rhs are np.arrays or single
strings or floats.
mask: pd.DataFrame/np.array
Boolean mask with False values for constraints which are skipped.
The shape of the mask has to match the shape of the array that come out
when combining lhs, sense and rhs.
Example
--------
Let's say we want to constraint all gas generators to a maximum of 100 MWh
during the first 10 snapshots. We then firstly get all operational variables
for this subset and constraint there sum to less equal 100.
>>> from pypsa.linopt import get_var, linexpr, define_constraints
>>> gas_i = n.generators.query('carrier == "Natural Gas"').index
>>> gas_vars = get_var(n, 'Generator', 'p').loc[n.snapshots[:10], gas_i]
>>> lhs = linexpr((1, gas_vars)).sum().sum()
>>> define_(n, lhs, '<=', 100, 'Generator', 'gas_power_limit')
Now the constraint references can be accessed by
:func:`pypsa.linopt.get_con` using
>>> cons = get_var(n, 'Generator', 'gas_power_limit')
Under the hood they are stored in n.cons.Generator.pnl.gas_power_limit.
For retrieving their shadow prices add the general name of the constraint
to the keep_shadowprices argument.
Note that this is useful for the `extra_functionality` argument.
"""
con = write_constraint(n, lhs, sense, rhs, axes, mask)
set_conref(n, con, name, attr, spec=spec)
return con
# =============================================================================
# writing functions
# =============================================================================
def _get_handlers(axes, *maybearrays):
axes = [axes] if isinstance(axes, pd.Index) else axes
if axes is None:
axes, shape = broadcasted_axes(*maybearrays)
else:
shape = tuple(map(len, axes))
size = np.prod(shape)
return axes, shape, size
def write_bound(n, lower, upper, axes=None, mask=None):
"""
Writer function for writing out multiple variables at a time. If lower and
upper are floats it demands to give pass axes, a tuple of (index, columns)
or (index), for creating the variable of same upper and lower bounds.
Return a series or frame with variable references.
"""
axes, shape, size = _get_handlers(axes, lower, upper)
if not size: return pd.Series(dtype=float)
n._xCounter += size
variables = np.arange(n._xCounter - size, n._xCounter).reshape(shape)
lower, upper = _str_array(lower), _str_array(upper)
exprs = lower + ' <= x' + _str_array(variables, True) + ' <= '+ upper + '\n'
if mask is not None:
exprs = np.where(mask, exprs, '')
variables = np.where(mask, variables, -1)
n.bounds_f.write(join_exprs(exprs))
return to_pandas(variables, *axes)
def write_constraint(n, lhs, sense, rhs, axes=None, mask=None):
"""
Writer function for writing out multiple constraints to the corresponding
constraints file. If lower and upper are numpy.ndarrays it axes must not be
None but a tuple of (index, columns) or (index).
Return a series or frame with constraint references.
"""
axes, shape, size = _get_handlers(axes, lhs, sense, rhs)
if not size: return pd.Series()
n._cCounter += size
cons = np.arange(n._cCounter - size, n._cCounter).reshape(shape)
if isinstance(sense, str):
sense = '=' if sense == '==' else sense
lhs, sense, rhs = _str_array(lhs), _str_array(sense), _str_array(rhs)
exprs = 'c' + _str_array(cons, True) + ':\n' + lhs + sense + ' ' + rhs + '\n\n'
if mask is not None:
exprs = np.where(mask, exprs, '')
cons = np.where(mask, cons, -1)
n.constraints_f.write(join_exprs(exprs))
return to_pandas(cons, *axes)
def write_binary(n, axes, mask=None):
"""
Writer function for writing out multiple binary-variables at a time.
According to the axes it writes out binaries for each entry the pd.Series
or pd.DataFrame spanned by axes. Returns a series or frame with variable
references.
"""
axes, shape, size = _get_handlers(axes)
n._xCounter += size
variables = np.arange(n._xCounter - size, n._xCounter).reshape(shape)
exprs = 'x' + _str_array(variables, True) + '\n'
if mask is not None:
exprs = np.where(mask, exprs, '')
variables = np.where(mask, variables, -1)
n.binaries_f.write(join_exprs(exprs))
return to_pandas(variables, *axes)
def write_objective(n, terms):
"""
Writer function for writing out one or multiple objective terms.
Parameters
----------
n : pypsa.Network
terms : str/numpy.array/pandas.Series/pandas.DataFrame
String or array of strings which represent new objective terms, built
with :func:`linexpr`
"""
n.objective_f.write(join_exprs(terms))
# =============================================================================
# helpers, helper functions
# =============================================================================
def broadcasted_axes(*dfs):
"""
Helper function which, from a collection of arrays, series, frames and other
values, retrieves the axes of series and frames which result from
broadcasting operations. It checks whether index and columns of given
series and frames, repespectively, are aligned. Using this function allows
to subsequently use pure numpy operations and keep the axes in the
background.
"""
axes = []
shape = (1,)
if set(map(type, dfs)) == {tuple}:
dfs = sum(dfs, ())
for df in dfs:
shape = np.broadcast_shapes(shape, np.asarray(df).shape)
if isinstance(df, (pd.Series, pd.DataFrame)):
if len(axes):
assert (axes[-1] == df.axes[-1]).all(), ('Series or DataFrames '
'are not aligned. Please make sure that all indexes and '
'columns of Series and DataFrames going into the linear '
'expression are equally sorted.')
axes = df.axes if len(df.axes) > len(axes) else axes
return axes, shape
def align_with_static_component(n, c, attr):
"""
Alignment of time-dependent variables with static components. If c is a
pypsa.component name, it will sort the columns of the variable according
to the static component.
"""
if c in n.all_components and (c, attr) in n.variables.index:
if not n.variables.pnl[c, attr]: return
if len(n.vars[c].pnl[attr].columns) != len(n.df(c).index): return
n.vars[c].pnl[attr] = n.vars[c].pnl[attr].reindex(columns=n.df(c).index)
def linexpr(*tuples, as_pandas=True, return_axes=False):
"""
Elementwise concatenation of tuples in the form (coefficient, variables).
Coefficient and variables can be arrays, series or frames. Per default
returns a pandas.Series or pandas.DataFrame of strings. If return_axes
is set to True the return value is split into values and axes, where values
are the numpy.array and axes a tuple containing index and column if present.
Parameters
----------
tuples: tuple of tuples
Each tuple must of the form (coeff, var), where
* coeff is a numerical value, or a numerical array, series, frame
* var is a str or a array, series, frame of variable strings
as_pandas : bool, default True
Whether to return to resulting array as a series, if 1-dimensional, or
a frame, if 2-dimensional. Supersedes return_axes argument.
return_axes: Boolean, default False
Whether to return index and column (if existent)
Example
-------
Initialize coefficients and variables
>>> coeff1 = 1
>>> var1 = pd.Series(['a1', 'a2', 'a3'])
>>> coeff2 = pd.Series([-0.5, -0.3, -1])
>>> var2 = pd.Series(['b1', 'b2', 'b3'])
Create the linear expression strings
>>> linexpr((coeff1, var1), (coeff2, var2))
0 +1.0 a1 -0.5 b1
1 +1.0 a2 -0.3 b2
2 +1.0 a3 -1.0 b3
dtype: object
For a further step the resulting frame can be used as the lhs of
:func:`pypsa.linopt.define_constraints`
For retrieving only the values:
>>> linexpr((coeff1, var1), (coeff2, var2), as_pandas=False)
array(['+1.0 a1 -0.5 b1', '+1.0 a2 -0.3 b2', '+1.0 a3 -1.0 b3'], dtype=object)
"""
axes, shape = broadcasted_axes(*tuples)
expr = np.repeat('', np.prod(shape)).reshape(shape).astype(object)
if np.prod(shape):
for coeff, var in tuples:
expr = expr + _str_array(coeff) + ' x' + _str_array(var, True) + '\n'
if isinstance(expr, np.ndarray):
isna = np.isnan(coeff) | np.isnan(var) | (var == -1)
expr = np.where(isna, '', expr)
if return_axes:
return (expr, *axes)
if as_pandas:
return to_pandas(expr, *axes)
return expr
def to_pandas(array, *axes):
"""
Convert a numpy array to pandas.Series if 1-dimensional or to a
pandas.DataFrame if 2-dimensional. Provide index and columns if needed
"""
return pd.Series(array, *axes) if array.ndim == 1 else pd.DataFrame(array, *axes)
_to_float_str = lambda f: '%+f'%f
_v_to_float_str = np.vectorize(_to_float_str, otypes=[object])
_to_int_str = lambda d: '%d'%d
_v_to_int_str = np.vectorize(_to_int_str, otypes=[object])
def _str_array(array, integer_string=False):
if isinstance(array, (float, int)):
if integer_string:
return _to_int_str(array)
return _to_float_str(array)
array = np.asarray(array)
if array.dtype.type == np.str_:
array = np.asarray(array, dtype=object)
if array.dtype < str and array.size:
if integer_string:
array = np.nan_to_num(array, False, -1)
return _v_to_int_str(array)
return _v_to_float_str(array)
else:
return array
def join_exprs(df):
"""
Helper function to join arrays, series or frames of strings together.
"""
return ''.join(np.asarray(df).flatten())
# =============================================================================
# references to vars and cons, rewrite this part to not store every reference
# =============================================================================
def _add_reference(ref_dict, df, attr, pnl=True):
if pnl:
if attr in ref_dict.pnl:
ref_dict.pnl[attr][df.columns] = df
else:
ref_dict.pnl[attr] = df
else:
if attr in ref_dict.df:
ref_dict.df = pd.concat([ref_dict.df, df.to_frame(attr)])
else:
ref_dict.df[attr] = df
def set_varref(n, variables, c, attr, spec=''):
"""
Sets variable references to the network.
One-dimensional variable references will be collected at `n.vars[c].df`,
two-dimensional varaibles in `n.vars[c].pnl`. For example:
* nominal capacity variables for generators are stored in
`n.vars.Generator.df.p_nom`
* operational variables for generators are stored in
`n.vars.Generator.pnl.p`
"""
if not variables.empty:
pnl = variables.ndim == 2
if c not in n.variables.index:
n.vars[c] = Dict(df=pd.DataFrame(), pnl=Dict())
if ((c, attr) in n.variables.index) and (spec != ''):
n.variables.at[idx[c, attr], 'specification'] += ', ' + spec
else:
n.variables.loc[idx[c, attr], :] = [pnl, spec]
_add_reference(n.vars[c], variables, attr, pnl=pnl)
def set_conref(n, constraints, c, attr, spec=''):
"""
Sets constraint references to the network.
One-dimensional constraint references will be collected at `n.cons[c].df`,
two-dimensional in `n.cons[c].pnl`
For example:
* constraints for nominal capacity variables for generators are stored in
`n.cons.Generator.df.mu_upper`
* operational capacity limits for generators are stored in
`n.cons.Generator.pnl.mu_upper`
"""
if not constraints.empty:
pnl = constraints.ndim == 2
if c not in n.constraints.index:
n.cons[c] = Dict(df=pd.DataFrame(), pnl=Dict())
if ((c, attr) in n.constraints.index) and (spec != ''):
n.constraints.at[idx[c, attr], 'specification'] += ', ' + spec
else:
n.constraints.loc[idx[c, attr], :] = [pnl, spec]
_add_reference(n.cons[c], constraints, attr, pnl=pnl)
def get_var(n, c, attr, pop=False):
"""
Retrieves variable references for a given static or time-depending
attribute of a given component. The function looks into n.variables to
detect whether the variable is a time-dependent or static.
Parameters
----------
n : pypsa.Network
c : str
component name to which the constraint belongs
attr: str
attribute name of the constraints
Example
-------
>>> get_var(n, 'Generator', 'p')
"""
vvars = n.vars[c].pnl if n.variables.pnl[c, attr] else n.vars[c].df
return vvars.pop(attr) if pop else vvars[attr]
def get_con(n, c, attr, pop=False):
"""
Retrieves constraint references for a given static or time-depending
attribute of a give component.
Parameters
----------
n : pypsa.Network
c : str
component name to which the constraint belongs
attr: str
attribute name of the constraints
Example
-------
get_con(n, 'Generator', 'mu_upper')
"""
cons = n.cons[c].pnl if n.constraints.pnl[c, attr] else n.cons[c].df
return cons.pop(attr) if pop else cons[attr]
def get_sol(n, name, attr=''):
"""
Retrieves solution for a given variable. Note that a lookup of all stored
solutions is given in n.solutions.
Parameters
----------
n : pypsa.Network
c : str
general variable name (or component name if variable is attached to a
component)
attr: str
attribute name of the variable
Example
-------
get_dual(n, 'Generator', 'mu_upper')
"""
pnl = n.solutions.at[(name, attr), 'pnl']
if n.solutions.at[(name, attr), 'in_comp']:
return n.pnl(name)[attr] if pnl else n.df(name)[attr + '_opt']
else:
return n.sols[name].pnl[attr] if pnl else n.sols[name].df[attr]
def get_dual(n, name, attr=''):
"""
Retrieves shadow price for a given constraint. Note that for retrieving
shadow prices of a custom constraint, its name has to be passed to
`keep_references` in the lopf, or `keep_references` has to be set to True.
Note that a lookup of all stored shadow prices is given in n.dualvalues.
Parameters
----------
n : pypsa.Network
c : str
constraint name to which the constraint belongs
attr: str
attribute name of the constraints
Example
-------
get_dual(n, 'Generator', 'mu_upper')
"""
pnl = n.dualvalues.at[(name, attr), 'pnl']
if n.dualvalues.at[(name, attr), 'in_comp']:
return n.pnl(name)[attr] if pnl else n.df(name)[attr]
else:
return n.duals[name].pnl[attr] if pnl else n.duals[name].df[attr]
# =============================================================================
# solvers
# =============================================================================
def set_int_index(ser):
ser.index = ser.index.str[1:].astype(int)
return ser
def run_and_read_highs(n, problem_fn, solution_fn, solver_logfile,
solver_options={}, warmstart=None, store_basis=True):
"""
Highs solver function. Reads a linear problem file and passes it to the highs
solver. If the solution is feasible the function returns the objective,
solution and dual constraint variables. Highs must be installed for usage.
Documentation: https://www.maths.ed.ac.uk/hall/HiGHS/
Installation
-------------
The script might only work for version HiGHS 1.1.1. Installation steps::
sudo apt-get install cmake # if not installed
git clone git@github.com:ERGO-Code/HiGHS.git
cd HiGHS
git checkout 95342daa73543cc21e5b27db3e0fbf7330007541 # moves to HiGHS 1.1.1
mkdir build
cd build
cmake ..
make
ctest
Then in .bashrc add paths of executables and library ::
export PATH="${PATH}:/foo/HiGHS/build/bin"
export LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:/foo/HiGHS/build/lib"
source .bashrc
Now when typing ``highs`` in the terminal you should see something like ::
Running HiGHS 1.1.1 [date: 2021-11-14, git hash: 95342daa]
Architecture
-------------
The function reads and execute (i.e. subprocess.Popen,...) terminal
commands of the solver. Meaning the command can be also executed at your
command window/terminal if HiGHs is installed. Executing the commands on
your local terminal helps to identify the raw outputs that are useful for
developing the interface further.
All functions below the "process = ..." do only read and save the outputs
generated from the HiGHS solver. These parts are solver specific and
depends on the solver output.
Solver options
---------------
Solver options are read by the 1) command window and the 2) option_file.txt
1) An example list of solver options executable by the command window is given here:
Examples:
--model_file arg File of model to solve.
--presolve arg Presolve: "choose" by default - "on"/"off" are alternatives.
--solver arg Solver: "choose" by default - "simplex"/"ipm" are alternatives.
--parallel arg Parallel solve: "choose" by default - "on"/"off" are alternatives.
--time_limit arg Run time limit (double).
--options_file arg File containing HiGHS options.
-h, --help Print help.
2) The options_file.txt gives some more options, see a full list here:
https://www.maths.ed.ac.uk/hall/HiGHS/HighsOptions.set
By default, we insert a couple of options for the ipm solver. The dictionary
can be overwritten by simply giving the new values. For instance, you could
write a dictionary replacing some of the default values or adding new options:
```
solver_options = {
name: highs,
method: ipm,
parallel: "on",
<option_name>: <value>,
}
```
Note, the <option_name> and <value> must be equivalent to the name convention
of HiGHS. Some function exist that are not documented, check their GitHub file:
https://github.com/ERGO-Code/HiGHS/blob/master/src/lp_data/HighsOptions.h
Output
------
status : string,
"ok" or "warning"
termination_condition : string,
Contains "optimal", "infeasible",
variables_sol : series
constraints_dual : series
objective : float
"""
logger.warning("The HiGHS solver can potentially solve towards variables that slightly deviate from Gurobi,cbc,glpk")
options_fn = "highs_options.txt"
default_dict = {
"method": "ipm",
"primal_feasibility_tolerance": 1e-04,
"dual_feasibility_tolerance": 1e-05,
"ipm_optimality_tolerance": 1e-6,
"presolve": "on",
"run_crossover": True,
"parallel": "off",
"threads": 4,
"solution_file": solution_fn,
"write_solution_to_file": True,
"write_solution_style": 1,
"log_to_console": True,
}
# update default_dict through solver_options and write to file
default_dict.update(solver_options)
method = default_dict.pop("method", "ipm")
logger.info(f"Options: \"{default_dict}\". List of options: https://www.maths.ed.ac.uk/hall/HiGHS/HighsOptions.set")
f1 = open(options_fn, "w")
f1.write('\n'.join([f"{k} = {v}" for k, v in default_dict.items()]))
f1.close()
# write (terminal) commands
command = f"highs --model_file {problem_fn} "
if warmstart:
logger.warning("Warmstart, not available in HiGHS. Will be ignored.")
command += f"--solver {method} --options_file {options_fn}"
logger.info(f"Solver command: \"{command}\"")
# execute command and store command window output
process = subprocess.Popen(
command.split(' '),
stdout=subprocess.PIPE,
universal_newlines=True
)
def read_until_break():
# Function that reads line by line the command window
while True:
out = process.stdout.readline(1)
if out == '' and process.poll() != None:
break
if out != '':
yield out
# converts stdout (standard terminal output) to pandas dataframe
log = io.StringIO(''.join(read_until_break())[:])
log = pd.read_csv(log, sep=':', index_col=0, header=None)[1].squeeze()
if solver_logfile is not None:
log.to_csv(solver_logfile, sep="\t")
log.index = log.index.str.strip()
os.remove(options_fn)
# read out termination_condition from `info`
model_status = log["Model status"].strip().lower()
if "optimal" in model_status:
status = "ok"
termination_condition = model_status
elif "infeasible" in model_status:
status = "warning"
termination_condition = model_status
else:
status = 'warning'
termination_condition = model_status
objective = float(log["Objective value"])
# read out solution file (.sol)
f = open(solution_fn, "rb")
trimed_sol_fn = re.sub(rb'\*\*\s+', b'', f.read())
f.close()
sol = pd.read_csv(io.BytesIO(trimed_sol_fn), header=[1], sep=r'\s+')
row_no = sol[sol["Index"] == 'Rows'].index[0]
sol = sol.drop(row_no+1) # Removes header line after "Rows"
sol_rows = sol[(sol.index > row_no)]
sol_cols = sol[(sol.index < row_no)].set_index("Name").pipe(set_int_index)
variables_sol = pd.to_numeric(sol_cols["Primal"], errors="raise")
constraints_dual = pd.to_numeric(sol_rows["Dual"], errors="raise").reset_index(drop=True)
constraints_dual.index += 1
return (status, termination_condition, variables_sol,
constraints_dual, objective)
def run_and_read_cbc(n, problem_fn, solution_fn, solver_logfile,
solver_options, warmstart=None, store_basis=True):
"""
Solving function. Reads the linear problem file and passes it to the cbc
solver. If the solution is successful it returns variable solutions and
constraint dual values.
For more information on the solver options, run 'cbc' in your shell
"""
with open(problem_fn, 'rb') as f:
for str in f.readlines():
assert (("> " in str.decode('utf-8')) == False), (">, must be"
"changed to >=")
assert (("< " in str.decode('utf-8')) == False), ("<, must be"
"changed to <=")
#printingOptions is about what goes in solution file
command = f"cbc -printingOptions all -import {problem_fn} "
if warmstart:
command += f'-basisI {warmstart} '
if (solver_options is not None) and (solver_options != {}):
command += solver_options
command += f"-solve -solu {solution_fn} "
if store_basis:
n.basis_fn = solution_fn.replace('.sol', '.bas')
command += f'-basisO {n.basis_fn} '
if not os.path.exists(solution_fn):
os.mknod(solution_fn)
log = open(solver_logfile, 'w') if solver_logfile is not None else subprocess.PIPE
result = subprocess.Popen(command.split(' '), stdout=log)
result.wait()
with open(solution_fn, "r") as f:
data = f.readline()
if data.startswith("Optimal - objective value"):
status = "ok"
termination_condition = "optimal"
objective = float(data[len("Optimal - objective value "):])
elif "Infeasible" in data:
status = "warning"
termination_condition = "infeasible"
else:
status = 'warning'
termination_condition = "other"
if termination_condition != "optimal":
return status, termination_condition, None, None, None
f = open(solution_fn,"rb")
trimed_sol_fn = re.sub(rb'\*\*\s+', b'', f.read())
f.close()
sol = pd.read_csv(io.BytesIO(trimed_sol_fn), header=None, skiprows=[0],
sep=r'\s+', usecols=[1,2,3], index_col=0)
variables_b = sol.index.str[0] == 'x'
variables_sol = sol[variables_b][2].pipe(set_int_index)
constraints_dual = sol[~variables_b][3].pipe(set_int_index)
return (status, termination_condition, variables_sol,
constraints_dual, objective)
def run_and_read_glpk(n, problem_fn, solution_fn, solver_logfile,
solver_options, warmstart=None, store_basis=True):
"""
Solving function. Reads the linear problem file and passes it to the glpk
solver. If the solution is successful it returns variable solutions and
constraint dual values.
For more information on the glpk solver options:
https://kam.mff.cuni.cz/~elias/glpk.pdf
"""
# TODO use --nopresol argument for non-optimal solution output
command = (f"glpsol --lp {problem_fn} --output {solution_fn}")
if solver_logfile is not None:
command += f' --log {solver_logfile}'
if warmstart:
command += f' --ini {warmstart}'
if store_basis:
n.basis_fn = solution_fn.replace('.sol', '.bas')
command += f' -w {n.basis_fn}'
if (solver_options is not None) and (solver_options != {}):
command += solver_options
result = subprocess.Popen(command.split(' '), stdout=subprocess.PIPE)
result.wait()
f = open(solution_fn)
def read_until_break(f):
linebreak = False
while not linebreak:
line = f.readline()
linebreak = line == '\n'
yield line
info = io.StringIO(''.join(read_until_break(f))[:-2])
info = pd.read_csv(info, sep=':', index_col=0, header=None)[1]
termination_condition = info.Status.lower().strip()
objective = float(re.sub(r'[^0-9\.\+\-e]+', '', info.Objective))
if termination_condition in ["optimal","integer optimal"]:
status = "ok"
termination_condition = "optimal"
elif termination_condition == "undefined":
status = "warning"
termination_condition = "infeasible"
else:
status = "warning"
if termination_condition != 'optimal':
return status, termination_condition, None, None, None
duals = io.StringIO(''.join(read_until_break(f))[:-2])
duals = pd.read_fwf(duals)[1:].set_index('Row name')
if 'Marginal' in duals:
constraints_dual = pd.to_numeric(duals['Marginal'], 'coerce')\
.fillna(0).pipe(set_int_index)
else:
logger.warning("Shadow prices of MILP couldn't be parsed")
constraints_dual = pd.Series(index=duals.index, dtype=float)
sol = io.StringIO(''.join(read_until_break(f))[:-2])
variables_sol = (pd.read_fwf(sol)[1:].set_index('Column name')
['Activity'].astype(float).pipe(set_int_index))
f.close()
return (status, termination_condition, variables_sol,
constraints_dual, objective)
def run_and_read_cplex(n, problem_fn, solution_fn, solver_logfile,
solver_options, warmstart=None, store_basis=True):
"""
Solving function. Reads the linear problem file and passes it to the cplex
solver. If the solution is successful it returns variable solutions and
constraint dual values. Cplex must be installed for using this function
"""
if find_spec('cplex') is None:
raise ModuleNotFoundError("Optional dependency 'cplex' not found."
"Install via 'conda install -c ibmdecisionoptimization cplex' "
"or 'pip install cplex'")
import cplex
_version = LooseVersion(cplex.__version__)
m = cplex.Cplex()
if solver_logfile is not None:
if _version >= "12.10":
log_file_or_path = open(solver_logfile, "w")
else:
log_file_or_path = solver_logfile
m.set_log_stream(log_file_or_path)
if solver_options is not None:
for key, value in solver_options.items():
param = m.parameters
for key_layer in key.split("."):
param = getattr(param, key_layer)
param.set(value)
m.read(problem_fn)
if warmstart:
m.start.read_basis(warmstart)
m.solve()
is_lp = m.problem_type[m.get_problem_type()] == 'LP'
if solver_logfile is not None:
if isinstance(log_file_or_path, io.IOBase):
log_file_or_path.close()
termination_condition = m.solution.get_status_string()
if 'optimal' in termination_condition:
status = 'ok'
termination_condition = 'optimal'
else:
status = 'warning'
if (status == 'ok') and store_basis and is_lp:
n.basis_fn = solution_fn.replace('.sol', '.bas')
try:
m.solution.basis.write(n.basis_fn)
except cplex.exceptions.errors.CplexSolverError:
logger.info('No model basis stored')
del n.basis_fn
objective = m.solution.get_objective_value()
variables_sol = pd.Series(m.solution.get_values(), m.variables.get_names())\
.pipe(set_int_index)
if is_lp:
constraints_dual = pd.Series(m.solution.get_dual_values(),
m.linear_constraints.get_names()).pipe(set_int_index)
else:
logger.warning("Shadow prices of MILP couldn't be parsed")
constraints_dual = pd.Series(index=m.linear_constraints.get_names())\
.pipe(set_int_index)
del m
return (status, termination_condition, variables_sol, constraints_dual,
objective)
def run_and_read_gurobi(n, problem_fn, solution_fn, solver_logfile,
solver_options, warmstart=None, store_basis=True):
"""
Solving function. Reads the linear problem file and passes it to the gurobi
solver. If the solution is successful it returns variable solutions and
constraint dual values. Gurobipy must be installed for using this function
For more information on solver options:
https://www.gurobi.com/documentation/{gurobi_verion}/refman/parameter_descriptions.html
"""
if find_spec('gurobipy') is None:
raise ModuleNotFoundError("Optional dependency 'gurobipy' not found. "
"Install via 'conda install -c gurobi gurobi' or follow the "
"instructions on the documentation page "
"https://www.gurobi.com/documentation/")
import gurobipy
# disable logging for this part, as gurobi output is doubled otherwise
logging.disable(50)
m = gurobipy.read(problem_fn)
if solver_options is not None:
for key, value in solver_options.items():
m.setParam(key, value)
if solver_logfile is not None:
m.setParam("logfile", solver_logfile)
if warmstart:
m.read(warmstart)
m.optimize()
logging.disable(1)
if store_basis:
n.basis_fn = solution_fn.replace('.sol', '.bas')
try:
m.write(n.basis_fn)
except gurobipy.GurobiError:
logger.info('No model basis stored')
del n.basis_fn
Status = gurobipy.GRB.Status
statusmap = {getattr(Status, s) : s.lower() for s in Status.__dir__()
if not s.startswith('_')}
termination_condition = statusmap[m.status]
if termination_condition == "optimal":
status = 'ok'
elif termination_condition == 'suboptimal':
status = 'warning'
elif termination_condition == "infeasible":
status = 'warning'
elif termination_condition == "inf_or_unbd":
status = 'warning'
termination_condition = "infeasible or unbounded"
else:
status = "warning"
if termination_condition not in ["optimal","suboptimal"]:
return status, termination_condition, None, None, None
variables_sol = pd.Series({v.VarName: v.x for v
in m.getVars()}).pipe(set_int_index)
try:
constraints_dual = pd.Series({c.ConstrName: c.Pi for c in
m.getConstrs()}).pipe(set_int_index)
except AttributeError:
logger.warning("Shadow prices of MILP couldn't be parsed")
constraints_dual = pd.Series(index=[c.ConstrName for c in m.getConstrs()])
objective = m.ObjVal
del m
return (status, termination_condition, variables_sol,
constraints_dual, objective)
def run_and_read_xpress(n, problem_fn, solution_fn, solver_logfile,
solver_options, keep_files, warmstart=None,
store_basis=True):
"""
Solving function. Reads the linear problem file and passes it to
the Xpress solver. If the solution is successful it returns
variable solutions and constraint dual values. The xpress module
must be installed for using this function.
For more information on solver options:
https://www.fico.com/fico-xpress-optimization/docs/latest/solver/GUID-ACD7E60C-7852-36B7-A78A-CED0EA291CDD.html
"""
import xpress
m = xpress.problem()
m.read(problem_fn)
m.setControl(solver_options)
if solver_logfile is not None:
m.setlogfile(solver_logfile)
if warmstart:
m.readbasis(warmstart)
m.solve()
if store_basis:
n.basis_fn = solution_fn.replace('.sol', '.bas')
try:
m.writebasis(n.basis_fn)
except:
logger.info('No model basis stored')
del n.basis_fn
termination_condition = m.getProbStatusString()
if termination_condition == 'mip_optimal' or \
termination_condition == 'lp_optimal':
status = 'ok'
termination_condition = 'optimal'
elif termination_condition == 'mip_unbounded' or \
termination_condition == 'mip_infeasible' or \
termination_condition == 'lp_unbounded' or \
termination_condition == 'lp_infeasible':
status = 'infeasible or unbounded'
else:
status = 'warning'
if termination_condition not in ["optimal"]:
return status, termination_condition, None, None, None
var = [str(v) for v in m.getVariable()]
variables_sol = pd.Series(m.getSolution(var), index=var).pipe(set_int_index)
try:
dual = [str(d) for d in m.getConstraint()]
constraints_dual = pd.Series(m.getDual(dual), index=dual).pipe(set_int_index)
except xpress.SolverError:
logger.warning("Shadow prices of MILP couldn't be parsed")
constraints_dual = pd.Series(index=dual).pipe(set_int_index)
objective = m.getObjVal()
del m
return (status, termination_condition, variables_sol,
constraints_dual, objective)
| [
"cplex.Cplex",
"os.remove",
"numpy.nan_to_num",
"pandas.read_csv",
"numpy.isnan",
"numpy.arange",
"gurobipy.read",
"xpress.problem",
"numpy.prod",
"pandas.DataFrame",
"os.path.exists",
"re.sub",
"io.BytesIO",
"os.mknod",
"numpy.vectorize",
"distutils.version.LooseVersion",
"numpy.asa... | [((1209, 1236), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1226, 1236), False, 'import logging, re, io, subprocess\n'), ((16209, 16253), 'numpy.vectorize', 'np.vectorize', (['_to_float_str'], {'otypes': '[object]'}), '(_to_float_str, otypes=[object])\n', (16221, 16253), True, 'import numpy as np\n'), ((16302, 16344), 'numpy.vectorize', 'np.vectorize', (['_to_int_str'], {'otypes': '[object]'}), '(_to_int_str, otypes=[object])\n', (16314, 16344), True, 'import numpy as np\n'), ((8866, 8880), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (8873, 8880), True, 'import numpy as np\n'), ((15465, 15479), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (15472, 15479), True, 'import numpy as np\n'), ((16544, 16561), 'numpy.asarray', 'np.asarray', (['array'], {}), '(array)\n', (16554, 16561), True, 'import numpy as np\n'), ((27858, 27879), 'os.remove', 'os.remove', (['options_fn'], {}), '(options_fn)\n', (27867, 27879), False, 'import os\n'), ((28798, 28847), 'pandas.to_numeric', 'pd.to_numeric', (["sol_cols['Primal']"], {'errors': '"""raise"""'}), "(sol_cols['Primal'], errors='raise')\n", (28811, 28847), True, 'import pandas as pd\n'), ((34754, 34785), 'distutils.version.LooseVersion', 'LooseVersion', (['cplex.__version__'], {}), '(cplex.__version__)\n', (34766, 34785), False, 'from distutils.version import LooseVersion\n'), ((34794, 34807), 'cplex.Cplex', 'cplex.Cplex', ([], {}), '()\n', (34805, 34807), False, 'import cplex\n'), ((37633, 37652), 'logging.disable', 'logging.disable', (['(50)'], {}), '(50)\n', (37648, 37652), False, 'import logging, re, io, subprocess\n'), ((37662, 37687), 'gurobipy.read', 'gurobipy.read', (['problem_fn'], {}), '(problem_fn)\n', (37675, 37687), False, 'import gurobipy\n'), ((37955, 37973), 'logging.disable', 'logging.disable', (['(1)'], {}), '(1)\n', (37970, 37973), False, 'import logging, re, io, subprocess\n'), ((40170, 40186), 'xpress.problem', 'xpress.problem', ([], {}), '()\n', (40184, 40186), False, 'import xpress\n'), ((9353, 9375), 'pandas.Series', 'pd.Series', ([], {'dtype': 'float'}), '(dtype=float)\n', (9362, 9375), True, 'import pandas as pd\n'), ((9652, 9677), 'numpy.where', 'np.where', (['mask', 'exprs', '""""""'], {}), "(mask, exprs, '')\n", (9660, 9677), True, 'import numpy as np\n'), ((9698, 9727), 'numpy.where', 'np.where', (['mask', 'variables', '(-1)'], {}), '(mask, variables, -1)\n', (9706, 9727), True, 'import numpy as np\n'), ((10241, 10252), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (10250, 10252), True, 'import pandas as pd\n'), ((10624, 10649), 'numpy.where', 'np.where', (['mask', 'exprs', '""""""'], {}), "(mask, exprs, '')\n", (10632, 10649), True, 'import numpy as np\n'), ((10665, 10689), 'numpy.where', 'np.where', (['mask', 'cons', '(-1)'], {}), '(mask, cons, -1)\n', (10673, 10689), True, 'import numpy as np\n'), ((11304, 11329), 'numpy.where', 'np.where', (['mask', 'exprs', '""""""'], {}), "(mask, exprs, '')\n", (11312, 11329), True, 'import numpy as np\n'), ((11350, 11379), 'numpy.where', 'np.where', (['mask', 'variables', '(-1)'], {}), '(mask, variables, -1)\n', (11358, 11379), True, 'import numpy as np\n'), ((16081, 16104), 'pandas.Series', 'pd.Series', (['array', '*axes'], {}), '(array, *axes)\n', (16090, 16104), True, 'import pandas as pd\n'), ((16129, 16155), 'pandas.DataFrame', 'pd.DataFrame', (['array', '*axes'], {}), '(array, *axes)\n', (16141, 16155), True, 'import pandas as pd\n'), ((16614, 16645), 'numpy.asarray', 'np.asarray', (['array'], {'dtype': 'object'}), '(array, dtype=object)\n', (16624, 16645), True, 'import numpy as np\n'), ((28492, 28517), 'io.BytesIO', 'io.BytesIO', (['trimed_sol_fn'], {}), '(trimed_sol_fn)\n', (28502, 28517), False, 'import logging, re, io, subprocess\n'), ((30239, 30266), 'os.path.exists', 'os.path.exists', (['solution_fn'], {}), '(solution_fn)\n', (30253, 30266), False, 'import os\n'), ((30276, 30297), 'os.mknod', 'os.mknod', (['solution_fn'], {}), '(solution_fn)\n', (30284, 30297), False, 'import os\n'), ((31131, 31156), 'io.BytesIO', 'io.BytesIO', (['trimed_sol_fn'], {}), '(trimed_sol_fn)\n', (31141, 31156), False, 'import logging, re, io, subprocess\n'), ((32804, 32856), 'pandas.read_csv', 'pd.read_csv', (['info'], {'sep': '""":"""', 'index_col': '(0)', 'header': 'None'}), "(info, sep=':', index_col=0, header=None)\n", (32815, 32856), True, 'import pandas as pd\n'), ((32939, 32986), 're.sub', 're.sub', (['"""[^0-9\\\\.\\\\+\\\\-e]+"""', '""""""', 'info.Objective'], {}), "('[^0-9\\\\.\\\\+\\\\-e]+', '', info.Objective)\n", (32945, 32986), False, 'import logging, re, io, subprocess\n'), ((33758, 33799), 'pandas.Series', 'pd.Series', ([], {'index': 'duals.index', 'dtype': 'float'}), '(index=duals.index, dtype=float)\n', (33767, 33799), True, 'import pandas as pd\n'), ((34507, 34525), 'importlib.util.find_spec', 'find_spec', (['"""cplex"""'], {}), "('cplex')\n", (34516, 34525), False, 'from importlib.util import find_spec\n'), ((37245, 37266), 'importlib.util.find_spec', 'find_spec', (['"""gurobipy"""'], {}), "('gurobipy')\n", (37254, 37266), False, 'from importlib.util import find_spec\n'), ((9416, 9458), 'numpy.arange', 'np.arange', (['(n._xCounter - size)', 'n._xCounter'], {}), '(n._xCounter - size, n._xCounter)\n', (9425, 9458), True, 'import numpy as np\n'), ((10288, 10330), 'numpy.arange', 'np.arange', (['(n._cCounter - size)', 'n._cCounter'], {}), '(n._cCounter - size, n._cCounter)\n', (10297, 10330), True, 'import numpy as np\n'), ((11152, 11194), 'numpy.arange', 'np.arange', (['(n._xCounter - size)', 'n._xCounter'], {}), '(n._xCounter - size, n._xCounter)\n', (11161, 11194), True, 'import numpy as np\n'), ((16734, 16765), 'numpy.nan_to_num', 'np.nan_to_num', (['array', '(False)', '(-1)'], {}), '(array, False, -1)\n', (16747, 16765), True, 'import numpy as np\n'), ((28871, 28918), 'pandas.to_numeric', 'pd.to_numeric', (["sol_rows['Dual']"], {'errors': '"""raise"""'}), "(sol_rows['Dual'], errors='raise')\n", (28884, 28918), True, 'import pandas as pd\n'), ((12630, 12644), 'numpy.asarray', 'np.asarray', (['df'], {}), '(df)\n', (12640, 12644), True, 'import numpy as np\n'), ((15734, 15758), 'numpy.where', 'np.where', (['isna', '""""""', 'expr'], {}), "(isna, '', expr)\n", (15742, 15758), True, 'import numpy as np\n'), ((17006, 17020), 'numpy.asarray', 'np.asarray', (['df'], {}), '(df)\n', (17016, 17020), True, 'import numpy as np\n'), ((27671, 27722), 'pandas.read_csv', 'pd.read_csv', (['log'], {'sep': '""":"""', 'index_col': '(0)', 'header': 'None'}), "(log, sep=':', index_col=0, header=None)\n", (27682, 27722), True, 'import pandas as pd\n'), ((33449, 33467), 'pandas.read_fwf', 'pd.read_fwf', (['duals'], {}), '(duals)\n', (33460, 33467), True, 'import pandas as pd\n'), ((18196, 18210), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (18208, 18210), True, 'import pandas as pd\n'), ((19097, 19111), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (19109, 19111), True, 'import pandas as pd\n'), ((41624, 41645), 'pandas.Series', 'pd.Series', ([], {'index': 'dual'}), '(index=dual)\n', (41633, 41645), True, 'import pandas as pd\n'), ((15412, 15426), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (15419, 15426), True, 'import numpy as np\n'), ((15665, 15680), 'numpy.isnan', 'np.isnan', (['coeff'], {}), '(coeff)\n', (15673, 15680), True, 'import numpy as np\n'), ((15683, 15696), 'numpy.isnan', 'np.isnan', (['var'], {}), '(var)\n', (15691, 15696), True, 'import numpy as np\n'), ((33549, 33591), 'pandas.to_numeric', 'pd.to_numeric', (["duals['Marginal']", '"""coerce"""'], {}), "(duals['Marginal'], 'coerce')\n", (33562, 33591), True, 'import pandas as pd\n'), ((33879, 33895), 'pandas.read_fwf', 'pd.read_fwf', (['sol'], {}), '(sol)\n', (33890, 33895), True, 'import pandas as pd\n')] |
import cv2
import numpy as np
from utils import Util
import pyttsx3 as p
engine = p.init()
class Lane:
def __init__(self, path):
self.path = path
self.util = Util()
def run_img(self, path):
img = cv2.imread(path)
img = cv2.resize(img, (800, 600))
#self.detect(img)
cv2.imshow('Frame', img)
def run(self, path):
cap = cv2.VideoCapture(path)
out = cv2.VideoWriter('output.mp4',0x7634706d , 20.0, (640,480))
if (cap.isOpened() == False):
print("Error opening video stream or file")
while(cap.isOpened()):
ret, frame = cap.read()
if ret == True:
# Display the resulting frame
frame = cv2.resize(frame, (800, 600))
out.write(frame)
self.detect(frame)
cv2.imshow('Frame', frame)
# Press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
def detect(self, screen):
vert = np.array(
[[100, 550], [375, 350], [450, 350], [800, 550]], np.int32)
fin = self.util.edgeDetect(screen)
fin = self.util.roi(fin, [vert])
line = cv2.HoughLinesP(fin, 2, np.pi/180, 20, 7, 7)
if not(line is None):
for i in line:
cv2.line(screen, (i[0][0], i[0][1]),
(i[0][2], i[0][3]), (255, 0, 0), 10)
l1dataset = []
l2dataset = []
try:
straightxcors, straightycors = self.util.averageLanes(line)
xcors, ycors = self.util.getPoints(line)
l1dataset.append(straightxcors[0])
l1dataset.append(straightycors[0])
l2dataset.append(straightxcors[1])
l2dataset.append(straightxcors[1])
allstraightxcors = straightxcors[0] + straightxcors[1]
allstraightycors = straightycors[0] + straightycors[1]
l1m, l1b = self.util.linearRegression(l1dataset[0], l1dataset[1])
l2m, l2b = self.util.linearRegression(l2dataset[0], l2dataset[1])
allm, allb = self.util.linearRegression(
allstraightxcors, allstraightycors)
allxcor1 = int((allm * 350) + allb)
allxcor2 = int(allb)
filterl1x = []
filterl1y = []
filterl2x = []
filterl2y = []
for count, i in enumerate(ycors):
if (i*l2m + l2b < xcors[count]):
filterl2x.append(xcors[count])
filterl2y.append(i)
else:
filterl1x.append(xcors[count])
filterl1y.append(i)
l1inx1 = int((600 - l1b) / l1m)
l1inx2 = int((350-l1b) / l1m)
l2inx1 = int((600-l2b) / l2m)
l2inx2 = int((350-l2b) / l2m)
cv2.line(screen, (int(l1inx1), 600),
(int(l1inx2), 350), (0, 0, 0), 10)
cv2.line(screen, (int(l2inx1), 600),
(int(l2inx2), 350), (0, 0, 0), 10)
cv2.line(screen, (allxcor1, 600), (allxcor2,350), (255,0,0), 10)
turning = ""
results = self.util.intersection([l1m, l1b], [l2m, l2b])
if not (results is None):
if (results[0] > 400):
with open("write.txt", "w") as f:
f.write("Turn Left")
else:
with open("write.txt", "w") as f:
f.write("Turn Right")
else:
with open("write.txt", "w") as f:
f.write("Go straight")
try:
equ1, polyx1, polyy1 = self.util.polyReg(filterl2x, filterl2y)
for i in range(len(polyx1)):
if i == 0:
pass
else:
cv2.line(screen, (int(polyx1[i]), int(polyy1[i])), (int(
polyx1[i-1]), int(polyy1[i-1])), (255, 255, 0), 10)
except Exception as e:
print(e)
try:
equ2, polyx2, polyy2 = self.util.polyReg(filterl1x, filterl1y)
for i in range(len(polyx2)):
if i == 0:
pass
else:
cv2.line(screen, (int(polyx2[i]), int(polyy2[i])), (int(
polyx2[i-1]), int(polyy2[i-1])), (255, 255, 0), 10)
except:
pass
except Exception as e:
pass
return screen
| [
"cv2.line",
"pyttsx3.init",
"cv2.waitKey",
"cv2.destroyAllWindows",
"utils.Util",
"cv2.VideoCapture",
"cv2.imread",
"numpy.array",
"cv2.VideoWriter",
"cv2.HoughLinesP",
"cv2.imshow",
"cv2.resize"
] | [((84, 92), 'pyttsx3.init', 'p.init', ([], {}), '()\n', (90, 92), True, 'import pyttsx3 as p\n'), ((182, 188), 'utils.Util', 'Util', ([], {}), '()\n', (186, 188), False, 'from utils import Util\n'), ((237, 253), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (247, 253), False, 'import cv2\n'), ((268, 295), 'cv2.resize', 'cv2.resize', (['img', '(800, 600)'], {}), '(img, (800, 600))\n', (278, 295), False, 'import cv2\n'), ((330, 354), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'img'], {}), "('Frame', img)\n", (340, 354), False, 'import cv2\n'), ((396, 418), 'cv2.VideoCapture', 'cv2.VideoCapture', (['path'], {}), '(path)\n', (412, 418), False, 'import cv2\n'), ((433, 492), 'cv2.VideoWriter', 'cv2.VideoWriter', (['"""output.mp4"""', '(1983148141)', '(20.0)', '(640, 480)'], {}), "('output.mp4', 1983148141, 20.0, (640, 480))\n", (448, 492), False, 'import cv2\n'), ((1117, 1140), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1138, 1140), False, 'import cv2\n'), ((1187, 1255), 'numpy.array', 'np.array', (['[[100, 550], [375, 350], [450, 350], [800, 550]]', 'np.int32'], {}), '([[100, 550], [375, 350], [450, 350], [800, 550]], np.int32)\n', (1195, 1255), True, 'import numpy as np\n'), ((1368, 1414), 'cv2.HoughLinesP', 'cv2.HoughLinesP', (['fin', '(2)', '(np.pi / 180)', '(20)', '(7)', '(7)'], {}), '(fin, 2, np.pi / 180, 20, 7, 7)\n', (1383, 1414), False, 'import cv2\n'), ((3241, 3308), 'cv2.line', 'cv2.line', (['screen', '(allxcor1, 600)', '(allxcor2, 350)', '(255, 0, 0)', '(10)'], {}), '(screen, (allxcor1, 600), (allxcor2, 350), (255, 0, 0), 10)\n', (3249, 3308), False, 'import cv2\n'), ((756, 785), 'cv2.resize', 'cv2.resize', (['frame', '(800, 600)'], {}), '(frame, (800, 600))\n', (766, 785), False, 'import cv2\n'), ((870, 896), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (880, 896), False, 'import cv2\n'), ((1486, 1559), 'cv2.line', 'cv2.line', (['screen', '(i[0][0], i[0][1])', '(i[0][2], i[0][3])', '(255, 0, 0)', '(10)'], {}), '(screen, (i[0][0], i[0][1]), (i[0][2], i[0][3]), (255, 0, 0), 10)\n', (1494, 1559), False, 'import cv2\n'), ((963, 978), 'cv2.waitKey', 'cv2.waitKey', (['(25)'], {}), '(25)\n', (974, 978), False, 'import cv2\n')] |
# coding=utf-8
# Copyright 2022 The ML Fairness Gym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import distributions
import rewards
import test_util
from environments import lending
from environments import lending_params
from metrics import lending_metrics
import numpy as np
class CreditDistributionTest(absltest.TestCase):
def test_final_credit_distribution_metric_can_interact_with_lending(self):
env = lending.DelayedImpactEnv()
env.set_scalar_reward(rewards.NullReward())
# Use step=-1 to get the final credit distribution.
final_distribution = lending_metrics.CreditDistribution(env, step=-1)
initial_distribution = lending_metrics.CreditDistribution(env, step=0)
test_util.run_test_simulation(
env=env, metric=[final_distribution, initial_distribution])
def test_measure_distribution_change_measurement(self):
# The lower cluster has a 100% success rate and the upper cluster has a 0%
# success rate. This causes applicants to move constantly between clusters.
clusters = distributions.Mixture(
components=[
lending_params._credit_cluster_builder(
group_membership=[1, 0],
cluster_probs=[0.1, 0.9],
success_probs=[1., 0.])(),
lending_params._credit_cluster_builder(
group_membership=[0, 1],
cluster_probs=[0.8, 0.2],
success_probs=[1., 0.])(),
],
weights=(0.5, 0.5))
env = lending.DelayedImpactEnv(
lending_params.DelayedImpactParams(applicant_distribution=clusters))
initial_distribution = lending_metrics.CreditDistribution(env, 0)
final_distribution = lending_metrics.CreditDistribution(env, -1)
# Giving a loan should change the distribution.
env.step(np.asarray(1))
# Take another step to move current state into history. This step does not
# change the distribution because the loan is rejected.
env.step(np.asarray(0))
self.assertEqual({
'0': [0.1, 0.9],
'1': [0.8, 0.2]
}, initial_distribution.measure(env))
self.assertNotEqual({
'0': [0.1, 0.9],
'1': [0.8, 0.2]
}, final_distribution.measure(env))
class CumulativeLoansTest(absltest.TestCase):
def test_cumulative_count(self):
env = lending.DelayedImpactEnv()
metric = lending_metrics.CumulativeLoans(env)
env.seed(100)
_ = env.reset()
for _ in range(10):
env.step(np.asarray(1))
result = metric.measure(env)
self.assertEqual(result.shape, (2, 10))
# On the first step, the combined number of loans given out should be 1.
self.assertEqual(result[:, 0].sum(), 1)
# On the last step, the combined number of loans given out should be 10.
self.assertEqual(result[:, -1].sum(), 10)
def test_no_loans_to_group_zero(self):
env = lending.DelayedImpactEnv()
metric = lending_metrics.CumulativeLoans(env)
env.seed(100)
obs = env.reset()
for _ in range(10):
# action is 0 for group 0 and 1 for group 1.
action = np.argmax(obs['group'])
obs, _, _, _ = env.step(action)
result = metric.measure(env)
self.assertEqual(result.shape, (2, 10))
# Group 0 gets no loans.
self.assertEqual(result[0, -1], 0)
# Group 1 gets at least 1 loan.
self.assertGreater(result[1, -1], 0)
if __name__ == '__main__':
absltest.main()
| [
"absl.testing.absltest.main",
"environments.lending.DelayedImpactEnv",
"rewards.NullReward",
"numpy.argmax",
"metrics.lending_metrics.CreditDistribution",
"numpy.asarray",
"environments.lending_params._credit_cluster_builder",
"metrics.lending_metrics.CumulativeLoans",
"environments.lending_params.D... | [((4025, 4040), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (4038, 4040), False, 'from absl.testing import absltest\n'), ((1072, 1098), 'environments.lending.DelayedImpactEnv', 'lending.DelayedImpactEnv', ([], {}), '()\n', (1096, 1098), False, 'from environments import lending\n'), ((1228, 1276), 'metrics.lending_metrics.CreditDistribution', 'lending_metrics.CreditDistribution', (['env'], {'step': '(-1)'}), '(env, step=-1)\n', (1262, 1276), False, 'from metrics import lending_metrics\n'), ((1304, 1351), 'metrics.lending_metrics.CreditDistribution', 'lending_metrics.CreditDistribution', (['env'], {'step': '(0)'}), '(env, step=0)\n', (1338, 1351), False, 'from metrics import lending_metrics\n'), ((1356, 1449), 'test_util.run_test_simulation', 'test_util.run_test_simulation', ([], {'env': 'env', 'metric': '[final_distribution, initial_distribution]'}), '(env=env, metric=[final_distribution,\n initial_distribution])\n', (1385, 1449), False, 'import test_util\n'), ((2269, 2311), 'metrics.lending_metrics.CreditDistribution', 'lending_metrics.CreditDistribution', (['env', '(0)'], {}), '(env, 0)\n', (2303, 2311), False, 'from metrics import lending_metrics\n'), ((2337, 2380), 'metrics.lending_metrics.CreditDistribution', 'lending_metrics.CreditDistribution', (['env', '(-1)'], {}), '(env, -1)\n', (2371, 2380), False, 'from metrics import lending_metrics\n'), ((2953, 2979), 'environments.lending.DelayedImpactEnv', 'lending.DelayedImpactEnv', ([], {}), '()\n', (2977, 2979), False, 'from environments import lending\n'), ((2993, 3029), 'metrics.lending_metrics.CumulativeLoans', 'lending_metrics.CumulativeLoans', (['env'], {}), '(env)\n', (3024, 3029), False, 'from metrics import lending_metrics\n'), ((3499, 3525), 'environments.lending.DelayedImpactEnv', 'lending.DelayedImpactEnv', ([], {}), '()\n', (3523, 3525), False, 'from environments import lending\n'), ((3539, 3575), 'metrics.lending_metrics.CumulativeLoans', 'lending_metrics.CumulativeLoans', (['env'], {}), '(env)\n', (3570, 3575), False, 'from metrics import lending_metrics\n'), ((1125, 1145), 'rewards.NullReward', 'rewards.NullReward', ([], {}), '()\n', (1143, 1145), False, 'import rewards\n'), ((2173, 2240), 'environments.lending_params.DelayedImpactParams', 'lending_params.DelayedImpactParams', ([], {'applicant_distribution': 'clusters'}), '(applicant_distribution=clusters)\n', (2207, 2240), False, 'from environments import lending_params\n'), ((2447, 2460), 'numpy.asarray', 'np.asarray', (['(1)'], {}), '(1)\n', (2457, 2460), True, 'import numpy as np\n'), ((2614, 2627), 'numpy.asarray', 'np.asarray', (['(0)'], {}), '(0)\n', (2624, 2627), True, 'import numpy as np\n'), ((3707, 3730), 'numpy.argmax', 'np.argmax', (["obs['group']"], {}), "(obs['group'])\n", (3716, 3730), True, 'import numpy as np\n'), ((3108, 3121), 'numpy.asarray', 'np.asarray', (['(1)'], {}), '(1)\n', (3118, 3121), True, 'import numpy as np\n'), ((1745, 1864), 'environments.lending_params._credit_cluster_builder', 'lending_params._credit_cluster_builder', ([], {'group_membership': '[1, 0]', 'cluster_probs': '[0.1, 0.9]', 'success_probs': '[1.0, 0.0]'}), '(group_membership=[1, 0],\n cluster_probs=[0.1, 0.9], success_probs=[1.0, 0.0])\n', (1783, 1864), False, 'from environments import lending_params\n'), ((1923, 2042), 'environments.lending_params._credit_cluster_builder', 'lending_params._credit_cluster_builder', ([], {'group_membership': '[0, 1]', 'cluster_probs': '[0.8, 0.2]', 'success_probs': '[1.0, 0.0]'}), '(group_membership=[0, 1],\n cluster_probs=[0.8, 0.2], success_probs=[1.0, 0.0])\n', (1961, 2042), False, 'from environments import lending_params\n')] |
import numpy as np
#import h5py
from keras.models import Sequential
from keras.layers import LSTM, Dense
if __name__ == '__main__':
print('why is this so hard')
# generate and shape data
data = [[i for i in range(100)]]
data = np.array(data, dtype=float).reshape(1,1,100)
target = [[i for i in range(1, 101)]]
target = np.array(target, dtype=float).reshape(1,1,100)
# create test data
x_test=[i for i in range(100,200)]
x_test=np.array(x_test).reshape((1,1,100))
y_test=[i for i in range(101,201)]
y_test=np.array(y_test).reshape(1,1,100)
#train
model = Sequential()
model.add(LSTM(100, input_shape=(1,100),return_sequences=True,activation='sigmoid'))
model.add(Dense(100))
#add nesterov somewhere here?
model.compile(loss='mse', optimizer='adam',metrics=['accuracy'])
model.fit(data, target, epochs=500, batch_size=1, verbose=2, validation_data=(x_test, y_test))
#model.save_weights("/Users/rambo/testweights")
predict = model.predict(data)
weights = model.get_weights()
| [
"keras.models.Sequential",
"keras.layers.Dense",
"numpy.array",
"keras.layers.LSTM"
] | [((582, 594), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (592, 594), False, 'from keras.models import Sequential\n'), ((606, 682), 'keras.layers.LSTM', 'LSTM', (['(100)'], {'input_shape': '(1, 100)', 'return_sequences': '(True)', 'activation': '"""sigmoid"""'}), "(100, input_shape=(1, 100), return_sequences=True, activation='sigmoid')\n", (610, 682), False, 'from keras.layers import LSTM, Dense\n'), ((692, 702), 'keras.layers.Dense', 'Dense', (['(100)'], {}), '(100)\n', (697, 702), False, 'from keras.layers import LSTM, Dense\n'), ((242, 269), 'numpy.array', 'np.array', (['data'], {'dtype': 'float'}), '(data, dtype=float)\n', (250, 269), True, 'import numpy as np\n'), ((336, 365), 'numpy.array', 'np.array', (['target'], {'dtype': 'float'}), '(target, dtype=float)\n', (344, 365), True, 'import numpy as np\n'), ((449, 465), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (457, 465), True, 'import numpy as np\n'), ((529, 545), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (537, 545), True, 'import numpy as np\n')] |
"""Plotting module for elements.
This modules provides functions to plot the elements statistic data.
"""
import numpy as np
from bokeh.layouts import gridplot
from bokeh.plotting import figure
from scipy.stats import gaussian_kde
def plot_histogram(attribute_dict, label={}, var_list=[], **kwargs):
"""Plot histogram and the PDF.
This function creates a histogram to display the random variable
distribution.
Parameters
----------
attribute_dict : dict
Dictionary with element parameters.
label : dict
Dictionary with labels for each element parameter. Labels are displayed
on bokeh figure.
var_list : list, optional
List of random variables, in string format, to plot.
**kwargs : optional
Additional key word arguments can be passed to change
the numpy.histogram (e.g. density=True, bins=11, ...)
Returns
-------
grid_plot : bokeh row
A row with the histogram plots.
"""
default_values = dict(density=True, bins=21)
for k, v in default_values.items():
kwargs.setdefault(k, v)
figures = []
for var in var_list:
hist, edges = np.histogram(attribute_dict[var], **kwargs)
if kwargs["density"] is True:
y_label = "PDF"
else:
y_label = "Frequency"
fig = figure(
width=640,
height=480,
title="Histogram - {}".format(var),
x_axis_label="{}".format(label[var]),
y_axis_label="{}".format(y_label),
)
fig.xaxis.axis_label_text_font_size = "14pt"
fig.yaxis.axis_label_text_font_size = "14pt"
fig.axis.major_label_text_font_size = "14pt"
fig.title.text_font_size = "14pt"
fig.quad(
top=hist,
bottom=0,
left=edges[:-1],
right=edges[1:],
fill_color="red",
line_color="white",
alpha=1.0,
)
if y_label == "PDF":
x = np.linspace(
min(attribute_dict[var]),
max(attribute_dict[var]),
len(attribute_dict[var]),
)
kernel = gaussian_kde(attribute_dict[var])
fig.line(
x,
kernel(x),
line_alpha=1.0,
line_width=3.0,
line_color="royalblue",
legend_label="pdf estimation",
)
figures.append(fig)
grid_plot = gridplot([figures], toolbar_location="right")
return grid_plot
| [
"numpy.histogram",
"scipy.stats.gaussian_kde",
"bokeh.layouts.gridplot"
] | [((2506, 2551), 'bokeh.layouts.gridplot', 'gridplot', (['[figures]'], {'toolbar_location': '"""right"""'}), "([figures], toolbar_location='right')\n", (2514, 2551), False, 'from bokeh.layouts import gridplot\n'), ((1174, 1217), 'numpy.histogram', 'np.histogram', (['attribute_dict[var]'], {}), '(attribute_dict[var], **kwargs)\n', (1186, 1217), True, 'import numpy as np\n'), ((2193, 2226), 'scipy.stats.gaussian_kde', 'gaussian_kde', (['attribute_dict[var]'], {}), '(attribute_dict[var])\n', (2205, 2226), False, 'from scipy.stats import gaussian_kde\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, unused-argument, too-many-lines, len-as-condition
import tvm
import tvm.testing
from tvm import te
import numpy as np
from tvm.topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int32_cascadelake
from tvm.topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int32
import pytest
@tvm.testing.requires_llvm
@pytest.mark.skip("skip because feature not enabled")
def test_fc_int8_acc32():
m = 1024
n = 1024
k = 1024
X = te.placeholder((m, k), name='X', dtype="uint8")
W = te.placeholder((n, k), name='W', dtype="int8")
peak = 280
print("Peak {} Gops/s".format(peak))
memory_ops = m * k + n * k + 2 * m * n
gops_per_mm = 2 * m * n * k
# For LLVM < 8.0, it shows "'cascadelake' is not a recognized processor for this target
# (ignoring processor)" error with the following setting. After LLVM 8.0 is enabled in the
# test, we should use cascadelake setting.
def verify(target="llvm -mcpu=cascadelake"):
if not tvm.testing.device_enabled(target):
print("skip because %s is not enabled..." % target)
return
ctx = tvm.context(target, 0)
pc = dot_16x1x16_uint8_int8_int32_cascadelake()
ak = te.reduce_axis((0, k), name='k')
packedW = te.placeholder(
(n // 16, 16 * (k // 4), 4), name='packedW', dtype="int8")
t_fc = te.compute((m, n), lambda i, j: te.sum(X[i, ak].astype(
"int32") * packedW[j / 16, (ak / 4) * 16 + j % 16, ak % 4].astype("int32"), axis=ak), name="F")
t_sch = te.create_schedule(t_fc.op)
a_x, a_y = t_fc.op.axis
a_k, = t_fc.op.reduce_axis
a_yo, a_yi = t_sch[t_fc].split(a_y, factor=16)
a_xo, a_xi = t_sch[t_fc].split(a_x, factor=32)
a_ko, a_ki = t_sch[t_fc].split(a_k, factor=4)
a_koo, a_koi = t_sch[t_fc].split(a_ko, factor=4)
t_sch[t_fc].reorder(a_yo, a_xo, a_xi, a_koo, a_koi, a_yi, a_ki)
t_sch[t_fc].unroll(a_koi)
t_sch[t_fc].tensorize(a_yi, pc)
t_func = tvm.build(t_sch, [X, packedW, t_fc], target, name="intrinsic")
t_evaluator = t_func.time_evaluator(t_func.entry_name, ctx, number=10)
# generate the plain data
a_ = np.random.uniform(1, 10, size=(m, k)).astype("uint8")
b_ = np.random.uniform(1, 10, size=(n, k)).astype("int8")
packW = np.random.uniform(1, 10, size=(
n // 16, 16 * (k // 4), 4)).astype("int8")
# This occurs in pre_compute stage
for r_idx in range(n // 16):
for s_idx in range(16 * (k // 4)):
for t_idx in range(4):
packW[r_idx][s_idx][t_idx] = b_[r_idx * 16 + s_idx %
16][(s_idx // 16) * 4 + t_idx]
x = tvm.nd.array(a_, ctx)
w = tvm.nd.array(packW, ctx)
y = tvm.nd.array(np.zeros((m, n), dtype="int32"), ctx)
result = t_evaluator(x, w, y)
gops_per_sec = gops_per_mm / result.mean / 1e9
# verify the correctness
tvm.testing.assert_allclose(y.asnumpy(), np.dot(a_, b_.T), rtol=0)
print('Tensorization: running time: {:.3f} ms, {:.2f} Gops/s, effiency: {:.2f}'.format(
result.mean * 1000, gops_per_sec, gops_per_sec / peak))
t_func.export_library("tensorize_acc32.o")
verify()
if __name__ == "__main__":
# The test requires Cascade Lake and newer Intel machines to generate the
# correct AVX512 VNNI instruction. So, disabling the test.
# test_fc_int8_acc32()
pass
| [
"tvm.testing.device_enabled",
"tvm.te.placeholder",
"tvm.te.reduce_axis",
"numpy.random.uniform",
"tvm.nd.array",
"tvm.context",
"numpy.zeros",
"tvm.build",
"tvm.topi.x86.tensor_intrin.dot_16x1x16_uint8_int8_int32_cascadelake",
"tvm.te.create_schedule",
"numpy.dot",
"pytest.mark.skip"
] | [((1141, 1193), 'pytest.mark.skip', 'pytest.mark.skip', (['"""skip because feature not enabled"""'], {}), "('skip because feature not enabled')\n", (1157, 1193), False, 'import pytest\n'), ((1268, 1315), 'tvm.te.placeholder', 'te.placeholder', (['(m, k)'], {'name': '"""X"""', 'dtype': '"""uint8"""'}), "((m, k), name='X', dtype='uint8')\n", (1282, 1315), False, 'from tvm import te\n'), ((1324, 1370), 'tvm.te.placeholder', 'te.placeholder', (['(n, k)'], {'name': '"""W"""', 'dtype': '"""int8"""'}), "((n, k), name='W', dtype='int8')\n", (1338, 1370), False, 'from tvm import te\n'), ((1936, 1958), 'tvm.context', 'tvm.context', (['target', '(0)'], {}), '(target, 0)\n', (1947, 1958), False, 'import tvm\n'), ((1972, 2014), 'tvm.topi.x86.tensor_intrin.dot_16x1x16_uint8_int8_int32_cascadelake', 'dot_16x1x16_uint8_int8_int32_cascadelake', ([], {}), '()\n', (2012, 2014), False, 'from tvm.topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int32_cascadelake\n'), ((2028, 2060), 'tvm.te.reduce_axis', 'te.reduce_axis', (['(0, k)'], {'name': '"""k"""'}), "((0, k), name='k')\n", (2042, 2060), False, 'from tvm import te\n'), ((2079, 2152), 'tvm.te.placeholder', 'te.placeholder', (['(n // 16, 16 * (k // 4), 4)'], {'name': '"""packedW"""', 'dtype': '"""int8"""'}), "((n // 16, 16 * (k // 4), 4), name='packedW', dtype='int8')\n", (2093, 2152), False, 'from tvm import te\n'), ((2362, 2389), 'tvm.te.create_schedule', 'te.create_schedule', (['t_fc.op'], {}), '(t_fc.op)\n', (2380, 2389), False, 'from tvm import te\n'), ((2844, 2906), 'tvm.build', 'tvm.build', (['t_sch', '[X, packedW, t_fc]', 'target'], {'name': '"""intrinsic"""'}), "(t_sch, [X, packedW, t_fc], target, name='intrinsic')\n", (2853, 2906), False, 'import tvm\n'), ((3593, 3614), 'tvm.nd.array', 'tvm.nd.array', (['a_', 'ctx'], {}), '(a_, ctx)\n', (3605, 3614), False, 'import tvm\n'), ((3627, 3651), 'tvm.nd.array', 'tvm.nd.array', (['packW', 'ctx'], {}), '(packW, ctx)\n', (3639, 3651), False, 'import tvm\n'), ((1802, 1836), 'tvm.testing.device_enabled', 'tvm.testing.device_enabled', (['target'], {}), '(target)\n', (1828, 1836), False, 'import tvm\n'), ((3677, 3708), 'numpy.zeros', 'np.zeros', (['(m, n)'], {'dtype': '"""int32"""'}), "((m, n), dtype='int32')\n", (3685, 3708), True, 'import numpy as np\n'), ((3891, 3907), 'numpy.dot', 'np.dot', (['a_', 'b_.T'], {}), '(a_, b_.T)\n', (3897, 3907), True, 'import numpy as np\n'), ((3034, 3071), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)'], {'size': '(m, k)'}), '(1, 10, size=(m, k))\n', (3051, 3071), True, 'import numpy as np\n'), ((3101, 3138), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)'], {'size': '(n, k)'}), '(1, 10, size=(n, k))\n', (3118, 3138), True, 'import numpy as np\n'), ((3171, 3229), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)'], {'size': '(n // 16, 16 * (k // 4), 4)'}), '(1, 10, size=(n // 16, 16 * (k // 4), 4))\n', (3188, 3229), True, 'import numpy as np\n')] |
import logging
import os
from typing import Dict, List, Optional
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import ticker
from matplotlib.ticker import MaxNLocator, ScalarFormatter
from tess_atlas.data import TICEntry
from tess_atlas.utils import NOTEBOOK_LOGGER_NAME
from .labels import (
ECCENTRICITY_PLOT,
LATEX,
PARAMS_CATEGORIES,
POSTERIOR_PLOT,
PRIOR_PLOT,
)
from .plotting_utils import format_prior_samples_and_initial_params
logger = logging.getLogger(NOTEBOOK_LOGGER_NAME)
def plot_priors(
tic_entry: TICEntry, prior_samples: Dict, init_params: Dict
) -> None:
prior_samples, init_params = format_prior_samples_and_initial_params(
prior_samples, init_params
)
samples_table = {}
samples_table["Noise Params"] = get_samples_from_param_regexs(
prior_samples, PARAMS_CATEGORIES["NOISE PARAMS"]
)
samples_table["Stellar Params"] = get_samples_from_param_regexs(
prior_samples, PARAMS_CATEGORIES["STELLAR PARAMS"]
)
samples_table[f"Period Params"] = get_samples_from_param_regexs(
prior_samples, PARAMS_CATEGORIES["PERIOD PARAMS"]
)
samples_table[f"Planet Params"] = get_samples_from_param_regexs(
prior_samples, PARAMS_CATEGORIES["PLANET PARAMS"]
)
try:
fig = plot_histograms(
samples_table, trues=init_params, latex_label=LATEX
)
fname = os.path.join(tic_entry.outdir, f"{PRIOR_PLOT}")
logger.debug(f"Saving {fname}")
fig.savefig(fname)
except Exception as e:
logger.error(f"Cant plot priors: {e}")
def get_samples_from_param_regexs(samples, param_regex):
samples_keys = samples.columns.values
data = {}
for s in samples_keys:
for regex in param_regex:
if regex == s or f"{regex}_" in s:
data[s] = samples[s]
return data
def plot_histograms(
samples_table: Dict[str, Dict[str, np.array]],
fname: Optional[str] = "",
trues: Optional[Dict] = {},
latex_label: Optional[Dict] = {},
) -> None:
nrows = len(samples_table.keys())
ncols = __get_longest_row_length(samples_table)
fig, axes = __create_fig(nrows, ncols)
for row_i, (set_label, sample_set) in enumerate(samples_table.items()):
axes[row_i, 0].set_title(set_label + ":", loc="left")
for col_i, (sample_name, samples) in enumerate(sample_set.items()):
__plot_hist1d(axes[row_i, col_i], samples)
if trues:
axes[row_i, col_i].axvline(trues[sample_name], color="C1")
__add_ax(axes[row_i, col_i])
axes[row_i, col_i].set_xlabel(
latex_label.get(sample_name, sample_name)
)
format_label_string_with_offset(axes[row_i, col_i], "x")
plt.tight_layout()
if fname:
fig.savefig(fname)
else:
return fig
def __plot_hist1d(ax, x):
range = np.quantile(x, [0.01, 0.99])
ax.hist(x, density=True, bins=20, range=range, histtype="step", color="C0")
ax.set_xlim(min(range), max(range))
def __create_fig(nrows, ncols):
xdim, ydim = ncols * 2.5, nrows * 2.5
fig, axes = plt.subplots(nrows, ncols, figsize=(xdim, ydim))
for i in range(nrows):
for j in range(ncols):
__remove_ax(axes[i, j])
return fig, axes
def __remove_ax(ax):
ax.set_yticks([])
ax.set_xticks([])
ax.tick_params(direction="in")
ax.set_frame_on(False)
def __add_ax(ax):
ax.xaxis.set_major_locator(MaxNLocator(3))
ax.tick_params(direction="in")
ax.set_frame_on(True)
def update_label(old_label, offset_text):
if offset_text == "":
return old_label
try:
units = old_label[old_label.index("[") + 1 : old_label.rindex("]")]
except ValueError:
units = ""
label = old_label.replace("[{}]".format(units), "")
if "+" in offset_text:
offset_text = "+" + str(int(float(offset_text.replace("+", ""))))
return "{} [{} {}]".format(label, offset_text, units)
def format_label_string_with_offset(ax, axis="both"):
"""Format the label string with the exponent from the ScalarFormatter"""
ax.ticklabel_format(axis=axis, style="sci", scilimits=(-1e4, 1e4))
axes_instances = []
if axis in ["x", "both"]:
axes_instances.append(ax.xaxis)
if axis in ["y", "both"]:
axes_instances.append(ax.yaxis)
for ax in axes_instances:
ax.major.formatter._useMathText = False
ax.major.formatter._useOffset = True
plt.draw() # Update the text
offset_text = ax.get_offset_text().get_text()
label = ax.get_label().get_text()
ax.offsetText.set_visible(False)
ax.set_label_text(update_label(label, offset_text))
def __get_longest_row_length(
samples_table: Dict[str, Dict[str, np.array]]
) -> int:
return max(
[
len(samples_dicts.keys())
for label, samples_dicts in samples_table.items()
]
)
| [
"matplotlib.pyplot.tight_layout",
"numpy.quantile",
"matplotlib.ticker.MaxNLocator",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.draw",
"os.path.join",
"logging.getLogger"
] | [((510, 549), 'logging.getLogger', 'logging.getLogger', (['NOTEBOOK_LOGGER_NAME'], {}), '(NOTEBOOK_LOGGER_NAME)\n', (527, 549), False, 'import logging\n'), ((2826, 2844), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2842, 2844), True, 'import matplotlib.pyplot as plt\n'), ((2955, 2983), 'numpy.quantile', 'np.quantile', (['x', '[0.01, 0.99]'], {}), '(x, [0.01, 0.99])\n', (2966, 2983), True, 'import numpy as np\n'), ((3196, 3244), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nrows', 'ncols'], {'figsize': '(xdim, ydim)'}), '(nrows, ncols, figsize=(xdim, ydim))\n', (3208, 3244), True, 'import matplotlib.pyplot as plt\n'), ((1445, 1492), 'os.path.join', 'os.path.join', (['tic_entry.outdir', 'f"""{PRIOR_PLOT}"""'], {}), "(tic_entry.outdir, f'{PRIOR_PLOT}')\n", (1457, 1492), False, 'import os\n'), ((3540, 3554), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', (['(3)'], {}), '(3)\n', (3551, 3554), False, 'from matplotlib.ticker import MaxNLocator, ScalarFormatter\n'), ((4559, 4569), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (4567, 4569), True, 'import matplotlib.pyplot as plt\n')] |
'''
This is a plain consensus version modification on trainer.py
'''
import argparse
import asyncio
import os
import pickle
import sys
import time
import numpy as np
import resnet
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torchvision.datasets as datasets
import torchvision.transforms as transforms
sys.path.append('./distributed-learning/')
from model_statistics import ModelStatistics
from utils.consensus_tcp import ConsensusAgent
from prepare_agent_datasets import get_agent_train_loader, get_agent_val_loader
from consensus_master import TelemetryModelParameters, TelemetryAgentGeneralInfo
model_names = sorted(name for name in resnet.__dict__
if name.islower() and not name.startswith("__")
and name.startswith("resnet")
and callable(resnet.__dict__[name]))
def make_config_parser():
parser = argparse.ArgumentParser(description='Proper ResNets for CIFAR10 in pytorch')
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet20',
choices=model_names,
help='model architecture: ' + ' | '.join(model_names) +
' (default: resnet20)')
# Arguments for consensus:
parser.add_argument('--agent-token', '-t', required=True, type=int)
parser.add_argument('--agent-host', default='127.0.0.1', type=str)
parser.add_argument('--agent-port', required=True, type=int)
parser.add_argument('--init-leader', dest='init_leader', action='store_true')
parser.add_argument('--master-host', default='127.0.0.1', type=str)
parser.add_argument('--master-port', required=True, type=int)
parser.add_argument('--enable-log', dest='logging', action='store_true')
parser.add_argument('--total-agents', required=True, type=int)
parser.add_argument('--debug-consensus', dest='debug', action='store_true')
parser.add_argument('--use-prepared-data', dest='data_prepared', action='store_true')
parser.add_argument('--consensus-freq', dest='consensus_frequency', type=int, default=1,
help='freq>0 -> do averaging <freq> times per batch, '
'freq<0 -> do averaging once per (-freq) batches')
# parser.add_argument('--use-consensus-rounds', dest='use_consensus_rounds', action='store_true')
# parser.add_argument('--consensus-rounds-precision', dest='consensus_rounds_precision', type=float, default=1e-4)
parser.add_argument('--no-validation', dest='no_validation', action='store_true')
parser.add_argument('--use-lsr', dest='use_lsr', action='store_true')
parser.add_argument('--warmup', dest='warmup', default=0, type=int)
parser.add_argument('--momentum-consensus', dest='momentum_consensus', action='store_true')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=200, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=32, type=int,
metavar='N', help='mini-batch size (default: 32)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=50, type=int,
metavar='N', help='print frequency (default: 50)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (d efault: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--half', dest='half', action='store_true',
help='use half-precision(16-bit) ')
parser.add_argument('--save-dir', dest='save_dir',
help='The directory used to save the trained models',
default='save_temp', type=str)
parser.add_argument('--save-every', dest='save_every',
help='Saves checkpoints at every specified number of epochs',
type=int, default=10)
return parser
class ConsensusSpecific:
def __init__(self, cfg):
self.cfg = cfg
self.agent = None
self.agent_serve_task = None
self.run_averaging_exec_count = 0
self.batch_counter = 0
def init_consensus(self):
self.agent = ConsensusAgent(self.cfg.agent_token, self.cfg.agent_host, self.cfg.agent_port,
self.cfg.master_host, self.cfg.master_port,
debug=True if self.cfg.debug else False)
self.agent_serve_task = asyncio.create_task(self.agent.serve_forever())
print('{}: Created serving task'.format(self.cfg.agent_token))
def stop_consensus(self):
self.agent_serve_task.cancel()
def dump_params(self, model, optimizer_manager=None):
model_params = torch.cat([p.data.to(torch.float32).view(-1) for p in model.parameters()]).detach().clone().cpu().numpy()
if optimizer_manager is None:
return model_params
else:
optimizer_params = optimizer_manager.extract()
return np.concatenate([model_params, optimizer_params])
def load_params(self, model, params, optimizer_manager=None):
used_params = 0
for p in model.parameters():
cnt_params = p.numel()
p.data.copy_(torch.Tensor(params[used_params:used_params + cnt_params]).view(p.shape).to(p.dtype))
used_params += cnt_params
if optimizer_manager is not None:
optimizer_manager.set(params[used_params:])
async def run_averaging(self, model, optimizer=None):
if optimizer is not None:
optimizer_manager = MomentumBufferManager(optimizer)
else:
optimizer_manager = None
if self.cfg.consensus_frequency < 0:
if self.run_averaging_exec_count % (-self.cfg.consensus_frequency) == 0:
params = self.dump_params(model, optimizer_manager)
params = await self.agent.run_once(params)
self.load_params(model, params, optimizer_manager)
else:
params = self.dump_params(model, optimizer_manager)
for _ in range(self.cfg.consensus_frequency):
params = await self.agent.run_once(params)
self.load_params(model, params, optimizer_manager)
self.run_averaging_exec_count += 1
class MomentumBufferManager:
def __init__(self, optimizer):
self.optimizer = optimizer
self.shapes = []
self.sizes = []
def extract(self):
momentum_buffer_list = []
for group in self.optimizer.param_groups:
for p in group['params']:
if p.grad is not None:
state = self.optimizer.state[p]
if 'momentum_buffer' not in state:
raise ValueError('Initialize momentum buffer before extract them')
else:
momentum_buffer_list.append(state['momentum_buffer'])
extracted_buf = []
for buf in momentum_buffer_list:
np_buf = buf.clone().detach().cpu().numpy()
self.shapes.append(np_buf.shape)
self.sizes.append(np_buf.size)
extracted_buf.append(np_buf.reshape(-1,))
return np.concatenate(extracted_buf)
def set(self, values):
used_params = 0
momentum_buffer_list = []
for i in range(len(self.sizes)):
np_buf = values[used_params: used_params + self.sizes[i]].reshape(self.shapes[i])
buf = torch.Tensor(np_buf).cuda()
momentum_buffer_list.append(buf)
curr_id = 0
for group in self.optimizer.param_groups:
for p in group['params']:
if p.grad is not None:
self.optimizer.state[p]['momentum_buffer'] = momentum_buffer_list[curr_id].clone()
curr_id += 1
async def main(cfg):
best_prec1 = 0
torch.manual_seed(239)
print('Consensus agent: {}'.format(cfg.agent_token))
consensus_specific = ConsensusSpecific(cfg)
consensus_specific.init_consensus()
# Check the save_dir exists or not
cfg.save_dir = os.path.join(cfg.save_dir, str(cfg.agent_token))
if not os.path.exists(cfg.save_dir):
os.makedirs(cfg.save_dir)
model = torch.nn.DataParallel(resnet.__dict__[cfg.arch]())
model.cuda()
print('{}: Created model'.format(cfg.agent_token))
statistics = ModelStatistics(cfg.agent_token)
# optionally resume from a checkpoint
if cfg.resume:
if os.path.isfile(cfg.resume):
if cfg.logging:
print("=> loading checkpoint '{}'".format(cfg.resume))
checkpoint = torch.load(cfg.resume)
cfg.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
if 'statistics' in checkpoint.keys():
statistics = pickle.loads(checkpoint['statistics'])
elif os.path.isfile(os.path.join(cfg.resume, 'statistics.pickle')):
statistics = ModelStatistics.load_from_file(os.path.join(cfg.resume, 'statistics.pickle'))
model.load_state_dict(checkpoint['state_dict'])
if cfg.logging:
print("=> loaded checkpoint '{}' (epoch {})"
.format(cfg.evaluate, checkpoint['epoch']))
else:
if cfg.logging:
print("=> no checkpoint found at '{}'".format(cfg.resume))
cudnn.benchmark = True
print('{}: Loading dataset...'.format(cfg.agent_token))
train_loader = get_agent_train_loader(cfg.agent_token, cfg.batch_size)
print('{}: loaded {} batches for train'.format(cfg.agent_token, len(train_loader)))
val_loader = None if cfg.no_validation else get_agent_val_loader(cfg.agent_token)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
if cfg.half:
model.half()
criterion.half()
optimizer = torch.optim.SGD(model.parameters(), cfg.lr,
momentum=cfg.momentum,
weight_decay=cfg.weight_decay)
def lr_schedule(epoch):
if cfg.use_lsr and epoch < cfg.warmup:
factor = np.power(cfg.total_agents, epoch/cfg.warmup)
else:
factor = cfg.total_agents if cfg.use_lsr else 1.0
if epoch >= 81:
factor /= 10
if epoch >= 122:
factor /= 10
return factor
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_schedule)
if cfg.arch != 'resnet20':
print('This code was not intended to be used on resnets other than resnet20')
if cfg.arch in ['resnet1202', 'resnet110']:
# for resnet1202 original paper uses lr=0.01 for first 400 minibatches for warm-up
# then switch back. In this setup it will correspond for first epoch.
for param_group in optimizer.param_groups:
param_group['lr'] = cfg.lr * 0.1
if cfg.evaluate:
validate(cfg, val_loader, model, criterion)
return
await consensus_specific.agent.send_telemetry(TelemetryAgentGeneralInfo(batches_per_epoch=len(train_loader)))
for epoch in range(cfg.start_epoch, cfg.epochs):
# train for one epoch
if cfg.logging:
print('current lr {:.5e}'.format(optimizer.param_groups[0]['lr']))
statistics.add('train_begin_timestamp', time.time())
await train(consensus_specific, train_loader, model, criterion, optimizer, epoch, statistics)
lr_scheduler.step()
statistics.add('train_end_timestamp', time.time())
# evaluate on validation set
statistics.add('validate_begin_timestamp', time.time())
prec1 = validate(cfg, val_loader, model, criterion)
statistics.add('validate_end_timestamp', time.time())
statistics.add('val_precision', prec1)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
if epoch > 0 and epoch % cfg.save_every == 0:
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'statistics': pickle.dumps(statistics)
}, is_best, filename=os.path.join(cfg.save_dir, 'checkpoint.th'))
save_checkpoint({
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best, filename=os.path.join(cfg.save_dir, 'model.th'))
statistics.dump_to_file(os.path.join(cfg.save_dir, 'statistics.pickle'))
consensus_specific.stop_consensus()
async def train(consensus_specific, train_loader, model, criterion, optimizer, epoch, statistics):
"""
Run one train epoch
"""
cfg = consensus_specific.cfg
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to train mode
model.train()
start = time.time()
end = time.time()
for i, (input, target) in enumerate(train_loader):
consensus_specific.batch_counter += 1
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda()
input_var = input.cuda()
target_var = target
if cfg.half:
input_var = input_var.half()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# average model
if cfg.momentum_consensus:
await consensus_specific.run_averaging(model, optimizer)
else:
await consensus_specific.run_averaging(model)
output = output.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
await consensus_specific.agent.send_telemetry(TelemetryModelParameters(
consensus_specific.batch_counter,
consensus_specific.dump_params(model)
))
if i % cfg.print_freq == 0:
if cfg.logging:
print('\rEpoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1), end='')
if cfg.logging:
print('\nEpoch took {:.2f} s.'.format(end - start))
statistics.add('train_precision', top1.avg)
statistics.add('train_loss', losses.avg)
def validate(cfg, val_loader, model, criterion):
if cfg.no_validation or val_loader is None:
return -1.0
"""
Run evaluation
"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
with torch.no_grad():
for i, (input, target) in enumerate(val_loader):
target = target.cuda()
input_var = input.cuda()
target_var = target.cuda()
if cfg.half:
input_var = input_var.half()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
output = output.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % cfg.print_freq == 0:
if cfg.logging:
print('\rTest: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1), end='')
if cfg.logging:
print('\n * Prec@1 {top1.avg:.3f}'.format(top1=top1))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
"""
Save the training model
"""
torch.save(state, filename)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
cfg = make_config_parser().parse_args()
asyncio.get_event_loop().run_until_complete(main(cfg))
| [
"argparse.ArgumentParser",
"os.path.isfile",
"torch.optim.lr_scheduler.LambdaLR",
"torch.no_grad",
"os.path.join",
"sys.path.append",
"model_statistics.ModelStatistics",
"numpy.power",
"torch.load",
"os.path.exists",
"torch.Tensor",
"pickle.dumps",
"pickle.loads",
"prepare_agent_datasets.g... | [((406, 448), 'sys.path.append', 'sys.path.append', (['"""./distributed-learning/"""'], {}), "('./distributed-learning/')\n", (421, 448), False, 'import sys\n'), ((977, 1053), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Proper ResNets for CIFAR10 in pytorch"""'}), "(description='Proper ResNets for CIFAR10 in pytorch')\n", (1000, 1053), False, 'import argparse\n'), ((8929, 8951), 'torch.manual_seed', 'torch.manual_seed', (['(239)'], {}), '(239)\n', (8946, 8951), False, 'import torch\n'), ((9435, 9467), 'model_statistics.ModelStatistics', 'ModelStatistics', (['cfg.agent_token'], {}), '(cfg.agent_token)\n', (9450, 9467), False, 'from model_statistics import ModelStatistics\n'), ((10561, 10616), 'prepare_agent_datasets.get_agent_train_loader', 'get_agent_train_loader', (['cfg.agent_token', 'cfg.batch_size'], {}), '(cfg.agent_token, cfg.batch_size)\n', (10583, 10616), False, 'from prepare_agent_datasets import get_agent_train_loader, get_agent_val_loader\n'), ((11492, 11559), 'torch.optim.lr_scheduler.LambdaLR', 'torch.optim.lr_scheduler.LambdaLR', (['optimizer'], {'lr_lambda': 'lr_schedule'}), '(optimizer, lr_lambda=lr_schedule)\n', (11525, 11559), False, 'import torch\n'), ((14040, 14051), 'time.time', 'time.time', ([], {}), '()\n', (14049, 14051), False, 'import time\n'), ((14062, 14073), 'time.time', 'time.time', ([], {}), '()\n', (14071, 14073), False, 'import time\n'), ((16535, 16546), 'time.time', 'time.time', ([], {}), '()\n', (16544, 16546), False, 'import time\n'), ((18009, 18036), 'torch.save', 'torch.save', (['state', 'filename'], {}), '(state, filename)\n', (18019, 18036), False, 'import torch\n'), ((5241, 5413), 'utils.consensus_tcp.ConsensusAgent', 'ConsensusAgent', (['self.cfg.agent_token', 'self.cfg.agent_host', 'self.cfg.agent_port', 'self.cfg.master_host', 'self.cfg.master_port'], {'debug': '(True if self.cfg.debug else False)'}), '(self.cfg.agent_token, self.cfg.agent_host, self.cfg.\n agent_port, self.cfg.master_host, self.cfg.master_port, debug=True if\n self.cfg.debug else False)\n', (5255, 5413), False, 'from utils.consensus_tcp import ConsensusAgent\n'), ((8260, 8289), 'numpy.concatenate', 'np.concatenate', (['extracted_buf'], {}), '(extracted_buf)\n', (8274, 8289), True, 'import numpy as np\n'), ((9217, 9245), 'os.path.exists', 'os.path.exists', (['cfg.save_dir'], {}), '(cfg.save_dir)\n', (9231, 9245), False, 'import os\n'), ((9255, 9280), 'os.makedirs', 'os.makedirs', (['cfg.save_dir'], {}), '(cfg.save_dir)\n', (9266, 9280), False, 'import os\n'), ((9541, 9567), 'os.path.isfile', 'os.path.isfile', (['cfg.resume'], {}), '(cfg.resume)\n', (9555, 9567), False, 'import os\n'), ((10753, 10790), 'prepare_agent_datasets.get_agent_val_loader', 'get_agent_val_loader', (['cfg.agent_token'], {}), '(cfg.agent_token)\n', (10773, 10790), False, 'from prepare_agent_datasets import get_agent_train_loader, get_agent_val_loader\n'), ((15182, 15193), 'time.time', 'time.time', ([], {}), '()\n', (15191, 15193), False, 'import time\n'), ((16556, 16571), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (16569, 16571), False, 'import torch\n'), ((6048, 6096), 'numpy.concatenate', 'np.concatenate', (['[model_params, optimizer_params]'], {}), '([model_params, optimizer_params])\n', (6062, 6096), True, 'import numpy as np\n'), ((9693, 9715), 'torch.load', 'torch.load', (['cfg.resume'], {}), '(cfg.resume)\n', (9703, 9715), False, 'import torch\n'), ((10861, 10882), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (10880, 10882), True, 'import torch.nn as nn\n'), ((11230, 11276), 'numpy.power', 'np.power', (['cfg.total_agents', '(epoch / cfg.warmup)'], {}), '(cfg.total_agents, epoch / cfg.warmup)\n', (11238, 11276), True, 'import numpy as np\n'), ((12431, 12442), 'time.time', 'time.time', ([], {}), '()\n', (12440, 12442), False, 'import time\n'), ((12620, 12631), 'time.time', 'time.time', ([], {}), '()\n', (12629, 12631), False, 'import time\n'), ((12722, 12733), 'time.time', 'time.time', ([], {}), '()\n', (12731, 12733), False, 'import time\n'), ((12844, 12855), 'time.time', 'time.time', ([], {}), '()\n', (12853, 12855), False, 'import time\n'), ((13595, 13642), 'os.path.join', 'os.path.join', (['cfg.save_dir', '"""statistics.pickle"""'], {}), "(cfg.save_dir, 'statistics.pickle')\n", (13607, 13642), False, 'import os\n'), ((17309, 17320), 'time.time', 'time.time', ([], {}), '()\n', (17318, 17320), False, 'import time\n'), ((18953, 18977), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (18975, 18977), False, 'import asyncio\n'), ((9895, 9933), 'pickle.loads', 'pickle.loads', (["checkpoint['statistics']"], {}), "(checkpoint['statistics'])\n", (9907, 9933), False, 'import pickle\n'), ((13523, 13561), 'os.path.join', 'os.path.join', (['cfg.save_dir', '"""model.th"""'], {}), "(cfg.save_dir, 'model.th')\n", (13535, 13561), False, 'import os\n'), ((14236, 14247), 'time.time', 'time.time', ([], {}), '()\n', (14245, 14247), False, 'import time\n'), ((15149, 15160), 'time.time', 'time.time', ([], {}), '()\n', (15158, 15160), False, 'import time\n'), ((8529, 8549), 'torch.Tensor', 'torch.Tensor', (['np_buf'], {}), '(np_buf)\n', (8541, 8549), False, 'import torch\n'), ((9966, 10011), 'os.path.join', 'os.path.join', (['cfg.resume', '"""statistics.pickle"""'], {}), "(cfg.resume, 'statistics.pickle')\n", (9978, 10011), False, 'import os\n'), ((13280, 13304), 'pickle.dumps', 'pickle.dumps', (['statistics'], {}), '(statistics)\n', (13292, 13304), False, 'import pickle\n'), ((13338, 13381), 'os.path.join', 'os.path.join', (['cfg.save_dir', '"""checkpoint.th"""'], {}), "(cfg.save_dir, 'checkpoint.th')\n", (13350, 13381), False, 'import os\n'), ((17272, 17283), 'time.time', 'time.time', ([], {}), '()\n', (17281, 17283), False, 'import time\n'), ((10074, 10119), 'os.path.join', 'os.path.join', (['cfg.resume', '"""statistics.pickle"""'], {}), "(cfg.resume, 'statistics.pickle')\n", (10086, 10119), False, 'import os\n'), ((6285, 6343), 'torch.Tensor', 'torch.Tensor', (['params[used_params:used_params + cnt_params]'], {}), '(params[used_params:used_params + cnt_params])\n', (6297, 6343), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""emnist_training.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1zOA0BJRrcOszo9kkTx5WIME5Ka7DfW0u
"""
from torchvision import datasets, transforms
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms
import matplotlib.pyplot as plt # for plotting
import torch.optim as optim
import numpy as np
from torch.utils.data.sampler import SubsetRandomSampler
from plate_data_loader import *
from opencvPlateGurss import *
from torch.utils.data import TensorDataset, DataLoader
# define a 2-layer artificial neural network
class Emnist_net2(nn.Module):
def __init__(self):
super(Emnist_net, self).__init__()
self.name = "ANN"
self.layer1 = nn.Linear(28 * 28, 450)
self.layer2 = nn.Linear(450, 47)
def forward(self, img):
flattened = img.view(-1, 28 * 28)
activation1 = self.layer1(flattened)
activation1 = F.relu(activation1)
activation2 = self.layer2(activation1)
return activation2
# define a 2-layer artificial neural network
class Emnist_net(nn.Module):
def __init__(self):
super(Emnist_net, self).__init__()
self.name = "CNN"
self.conv1 = nn.Conv2d(1, 5, 5) #input channel 1, output channel 5 24*24*5
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(5, 10, 5)
self.fc1 = nn.Linear(67*67*5, 2500)
self.fc1 = nn.Linear(2500, 47)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1,67*67*10)
x = F.relu(self.fc1(x))
x=self.fc2(x)
return x
def get_data_loader(batch_size, split):
torch.manual_seed(1) # set the random seed
emnist_data = datasets.EMNIST('data', train= True, split = "balanced", download = True,transform=transforms.ToTensor())
# print(len(emnist_data))
# count = np.zeros(47)
# for data, label in emnist_data:
# count[label]+=1
# print(np.max(count), np.min(count))
np.random.seed(1000)
indice =np.arange(len(emnist_data))
np.random.shuffle(indice)
split_idx = int(len(emnist_data)*split)
train_index = indice[:split_idx]
val_index = indice[split_idx:]
train_sampler = SubsetRandomSampler(train_index)
train_loader = torch.utils.data.DataLoader(emnist_data, batch_size=batch_size, num_workers=0, sampler=train_sampler)
val_sampler = SubsetRandomSampler(val_index)
val_loader = torch.utils.data.DataLoader(emnist_data, batch_size=batch_size,
num_workers=0, sampler=val_sampler)
testset = datasets.EMNIST('data', train= False, split = "balanced", download = True,transform=transforms.ToTensor())
# Get the list of indices to sample from
#test_sampler = SubsetRandomSampler(np.arange(len(emnist_data)))
test_loader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
num_workers=0)#, sampler = test_sampler
return train_loader, val_loader, test_loader
tr,v, te = get_data_loader(1, 0.7)
print(len(tr), len(v), len(te))
def get_accuracy(model, data_loader):
correct = 0
total = 0
for imgs, labels in data_loader:
if use_cuda and torch.cuda.is_available():
imgs = imgs.cuda()
labels = labels.cuda()
output = model(imgs)
#select index with maximum prediction score
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(labels.view_as(pred)).sum().item()
total += imgs.shape[0]
return correct / total
def train(model, lr, batch_size, epochs, split = 0.8):
train_loader, val_loader, test_loader = get_data_loader(batch_size, split)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=lr)
epoch = []
train_acc, val_acc, losses = [],[],[]
for epo in range(epochs):
for imgs, labels in iter(train_loader):
if use_cuda and torch.cuda.is_available():
imgs = imgs.cuda()
labels = labels.cuda()
model.cuda()
out = model(imgs) # forward pass
loss = criterion(out, labels) # compute the total loss
loss.backward() # backward pass (compute parameter updates)
optimizer.step() # make the updates for each parameter
optimizer.zero_grad()
losses.append(loss)
epoch.append(epo)
train_acc.append(get_accuracy(model, train_loader))
val_acc.append(get_accuracy(model, val_loader))
print("epoch:",epo, train_acc[-1], val_acc[-1])
model_path = "model_{0}_bs{1}_lr{2}_epoch{3}".format(e_net.name,
batch_size,
lr,
epochs)
torch.save(e_net.state_dict(), model_path)
plt.title("Training Curve learning rate:{}, epo:{}, batch_size:{}".format(lr, epo, batch_size))
plt.plot(losses, label="Train")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.show()
plt.title("Training Curve learning rate:{}, epo:{}, batch_size:{}".format(lr, epo, batch_size))
plt.plot(epoch, train_acc, label="Train")
plt.plot(epoch, val_acc, label="Validation")
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.legend(loc='best')
plt.show()
class Dataloader2(Dataset):
def __init__(self, csv_path, transform = None, datasetType = 0, one_hot = True):
"""
Args:
csv_path (string): path to csv file
transform: pytorch transforms for transform
test: whether to generate train/test loader
one_hot: whether the label is one-hot list or string of label
"""
# One hot list as label?
self.one_hot = one_hot
# Read the csv file
pf = pd.read_csv(csv_path)
# Filter the data:
# Only use 7 digits plates as datasets
sevenLengthPf = pf[pf.iloc[:, 2].str.len() == 7]
# Load train/test data
self.datasetType = datasetType
if self.datasetType == 0: # Train
tmp = sevenLengthPf[sevenLengthPf.iloc[:, 3] == 1]
self.data_info = tmp.iloc[:int(3*len(tmp)/4), :]
elif self.datasetType == 1: # Val
tmp = sevenLengthPf[sevenLengthPf.iloc[:, 3] == 1]
self.data_info = tmp.iloc[int(3*len(tmp)/4):, :]
elif self.datasetType == 2: # Test
self.data_info = sevenLengthPf[sevenLengthPf.iloc[:, 3] == 0]
# First column contains the image paths
self.paths = np.asarray(self.data_info.iloc[:, 1])
# Second column is the labels
self.labels = np.asarray(self.data_info.iloc[:, 2])
# Third column is for an train Boolean
self.trainBools = np.asarray(self.data_info.iloc[:, 3])
# Calculate len
self.data_len = len(self.data_info.index)
# Transform function
self.transform = transform
# Transform to tensor
self.to_tensor = transforms.ToTensor()
def __getitem__(self, index):
# Get image name from the pandas df
imageName = self.paths[index]
dirname = os.path.dirname(__file__)
image_path = os.path.join(dirname, '..//Picture//2017-IWT4S-CarsReId_LP-dataset', imageName)
# Open image
img = Image.open(image_path)
# Transform image to tensor
if self.transform !=None:
img = self.transform(img)
imgTensor = self.to_tensor(img)
# Get license plate number
if(self.one_hot == False):
label = self.labels[index]
else:
# Use one_hot
# creating initial dataframe
alphaNumerical_Types = ('0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z')
listOfPlate = []
for alphaNumerical in self.labels[index]:
place = alphaNumerical_Types.index(alphaNumerical)
if place >=0 and place <= 35:
# oneHotList = [0] * 36
# oneHotList[place] = 1
listOfPlate.append(place)
# import pdb; pdb.set_trace()
ident = torch.eye(36)
label = ident[torch.tensor(listOfPlate)]
# label = listOfPlate
return (imgTensor, label)
def __len__(self):
return self.data_len
tmp_x=[]
tmp_y=[]
no_le=0
le7=0
def check(Dataloader2,index):
count=0
# Get image name from the pandas df
imageName = Dataloader2.paths[index]
dirname = os.path.dirname(__file__)
image_path = os.path.join(dirname, '..//Picture//2017-IWT4S-CarsReId_LP-dataset', imageName)
alphaNumerical_Types = ('0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z')
# Open image
img = cv2.imread(image_path)
skip=False
try:
outputs, licenseGuess = slicePic(img)
except:
# print("An exception occurred")
skip=True
if (skip==False):
# print out images
if (len(outputs)==0):
global no_le
no_le=no_le+1
elif (len(outputs)<7):
global le7
le7=le7+1
if (len(outputs)==7):
count=0
#for image in outputs:
#print(list( Dataloader2.labels[index])[count])
#print(alphaNumerical_Types.index(list( Dataloader2.labels[index])[count]))
# global tmp_x
# global tmp_y
# tmp_x.append(image)
#tmp_y.append(alphaNumerical_Types.index(list( Dataloader2.labels[index])[count]))
#count=count+1
return 1
return 0
use_cuda = False
#print(torch.cuda.is_available())
#train(e_net,lr = 0.0001, batch_size = 32, epochs = 30)
#!unzip '/content/drive/My Drive/Colab Notebooks/APS360 LAB/project/imgs.zip' -d '/root/datasets'
#data_dir = "/root/datasets"
#data_transform = transforms.Compose([transforms.Resize((28,28)), transforms.Grayscale(num_output_channels=1),
# transforms.ToTensor()])
#test_set = datasets.ImageFolder(data_dir, transform=data_transform)
#test_loader = torch.utils.data.DataLoader(test_set, batch_size=1,
# num_workers=0, shuffle=True)
#Below is extract images from opencv
#train_loader, val_loader, test_loader = get_data_loader(32, 0.8)
# load csv
header = ['track_id', 'image_path', 'lp', 'train']
dirname = os.path.dirname(__file__)
data_transform = transforms.Compose([transforms.Resize((50,140))])
filename = os.path.join(dirname, '..//Picture//2017-IWT4S-CarsReId_LP-dataset//trainVal.csv')
train_data = Dataloader2(filename, transform=data_transform, datasetType = 0, one_hot = False)
print(train_data.labels[10])
count=0
#76032
for i in range(10000):
count=count+ check(train_data,i)
print("Total loop:",i)
print(count)
print(count)
print(no_le)
print(le7)
#print(len(tmp_x))
#print((tmp_x[0].shape))
#print(len(tmp_y))
#ent=Emnist_net()
#train(ent, lr, batch_size, 30, split = 0.8)
| [
"numpy.random.seed",
"torch.eye",
"torch.utils.data.DataLoader",
"torch.nn.functional.relu",
"torch.nn.Linear",
"numpy.random.shuffle",
"matplotlib.pyplot.show",
"torch.manual_seed",
"matplotlib.pyplot.legend",
"torch.nn.Conv2d",
"numpy.asarray",
"torch.cuda.is_available",
"torch.nn.MaxPool2... | [((1796, 1816), 'torch.manual_seed', 'torch.manual_seed', (['(1)'], {}), '(1)\n', (1813, 1816), False, 'import torch\n'), ((2109, 2129), 'numpy.random.seed', 'np.random.seed', (['(1000)'], {}), '(1000)\n', (2123, 2129), True, 'import numpy as np\n'), ((2171, 2196), 'numpy.random.shuffle', 'np.random.shuffle', (['indice'], {}), '(indice)\n', (2188, 2196), True, 'import numpy as np\n'), ((2325, 2357), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['train_index'], {}), '(train_index)\n', (2344, 2357), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((2375, 2481), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['emnist_data'], {'batch_size': 'batch_size', 'num_workers': '(0)', 'sampler': 'train_sampler'}), '(emnist_data, batch_size=batch_size, num_workers\n =0, sampler=train_sampler)\n', (2402, 2481), False, 'import torch\n'), ((2496, 2526), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['val_index'], {}), '(val_index)\n', (2515, 2526), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((2542, 2646), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['emnist_data'], {'batch_size': 'batch_size', 'num_workers': '(0)', 'sampler': 'val_sampler'}), '(emnist_data, batch_size=batch_size, num_workers\n =0, sampler=val_sampler)\n', (2569, 2646), False, 'import torch\n'), ((2931, 3005), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': 'batch_size', 'num_workers': '(0)'}), '(testset, batch_size=batch_size, num_workers=0)\n', (2958, 3005), False, 'import torch\n'), ((3812, 3833), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3831, 3833), True, 'import torch.nn as nn\n'), ((5031, 5062), 'matplotlib.pyplot.plot', 'plt.plot', (['losses'], {'label': '"""Train"""'}), "(losses, label='Train')\n", (5039, 5062), True, 'import matplotlib.pyplot as plt\n'), ((5065, 5084), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (5075, 5084), True, 'import matplotlib.pyplot as plt\n'), ((5087, 5105), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (5097, 5105), True, 'import matplotlib.pyplot as plt\n'), ((5108, 5118), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5116, 5118), True, 'import matplotlib.pyplot as plt\n'), ((5220, 5261), 'matplotlib.pyplot.plot', 'plt.plot', (['epoch', 'train_acc'], {'label': '"""Train"""'}), "(epoch, train_acc, label='Train')\n", (5228, 5261), True, 'import matplotlib.pyplot as plt\n'), ((5264, 5308), 'matplotlib.pyplot.plot', 'plt.plot', (['epoch', 'val_acc'], {'label': '"""Validation"""'}), "(epoch, val_acc, label='Validation')\n", (5272, 5308), True, 'import matplotlib.pyplot as plt\n'), ((5311, 5330), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (5321, 5330), True, 'import matplotlib.pyplot as plt\n'), ((5333, 5355), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (5343, 5355), True, 'import matplotlib.pyplot as plt\n'), ((5358, 5380), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (5368, 5380), True, 'import matplotlib.pyplot as plt\n'), ((5383, 5393), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5391, 5393), True, 'import matplotlib.pyplot as plt\n'), ((818, 841), 'torch.nn.Linear', 'nn.Linear', (['(28 * 28)', '(450)'], {}), '(28 * 28, 450)\n', (827, 841), True, 'import torch.nn as nn\n'), ((864, 882), 'torch.nn.Linear', 'nn.Linear', (['(450)', '(47)'], {}), '(450, 47)\n', (873, 882), True, 'import torch.nn as nn\n'), ((1021, 1040), 'torch.nn.functional.relu', 'F.relu', (['activation1'], {}), '(activation1)\n', (1027, 1040), True, 'import torch.nn.functional as F\n'), ((1306, 1324), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(5)', '(5)'], {}), '(1, 5, 5)\n', (1315, 1324), True, 'import torch.nn as nn\n'), ((1388, 1406), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (1400, 1406), True, 'import torch.nn as nn\n'), ((1428, 1447), 'torch.nn.Conv2d', 'nn.Conv2d', (['(5)', '(10)', '(5)'], {}), '(5, 10, 5)\n', (1437, 1447), True, 'import torch.nn as nn\n'), ((1467, 1495), 'torch.nn.Linear', 'nn.Linear', (['(67 * 67 * 5)', '(2500)'], {}), '(67 * 67 * 5, 2500)\n', (1476, 1495), True, 'import torch.nn as nn\n'), ((1511, 1530), 'torch.nn.Linear', 'nn.Linear', (['(2500)', '(47)'], {}), '(2500, 47)\n', (1520, 1530), True, 'import torch.nn as nn\n'), ((6660, 6697), 'numpy.asarray', 'np.asarray', (['self.data_info.iloc[:, 1]'], {}), '(self.data_info.iloc[:, 1])\n', (6670, 6697), True, 'import numpy as np\n'), ((6759, 6796), 'numpy.asarray', 'np.asarray', (['self.data_info.iloc[:, 2]'], {}), '(self.data_info.iloc[:, 2])\n', (6769, 6796), True, 'import numpy as np\n'), ((6871, 6908), 'numpy.asarray', 'np.asarray', (['self.data_info.iloc[:, 3]'], {}), '(self.data_info.iloc[:, 3])\n', (6881, 6908), True, 'import numpy as np\n'), ((7105, 7126), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (7124, 7126), False, 'from torchvision import datasets, transforms\n'), ((10886, 10914), 'torchvision.transforms.Resize', 'transforms.Resize', (['(50, 140)'], {}), '((50, 140))\n', (10903, 10914), False, 'from torchvision import datasets, transforms\n'), ((1938, 1959), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1957, 1959), False, 'from torchvision import datasets, transforms\n'), ((2782, 2803), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2801, 2803), False, 'from torchvision import datasets, transforms\n'), ((3328, 3353), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3351, 3353), False, 'import torch\n'), ((8429, 8442), 'torch.eye', 'torch.eye', (['(36)'], {}), '(36)\n', (8438, 8442), False, 'import torch\n'), ((4033, 4058), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4056, 4058), False, 'import torch\n'), ((8469, 8494), 'torch.tensor', 'torch.tensor', (['listOfPlate'], {}), '(listOfPlate)\n', (8481, 8494), False, 'import torch\n')] |
from csaf.utils.app import CsafApp
from csaf_examples.rejoin import generate_dubins_system, plot_aircrafts
from csaf_examples.cansat import generate_cansat_system, plot_sats
import numpy as np
if __name__ == '__main__':
descr = f"CSAF Examples Systems Viewer"
# chaser initial states (pos + vel)
sat_states = [[10, -10, -2.0, 2.1],
[10, -7, 0.7, 0.0],
[-12, -7, -0.3, 1.0],
[10, 0, -0.2, .1],
[5, 5, .4, -0.2],
[-5, 1, 0.0, 0.0]]
CanSat = generate_cansat_system(sat_states)
j_states = [[0, 0, np.deg2rad(45)],
[-5, -10, np.deg2rad(-30)],
[-3, -15, np.deg2rad(90)],
[0, -20, np.deg2rad(0)]]
DubinsRejoin = generate_dubins_system(j_states)
plotters = {CanSat.__class__: plot_sats, DubinsRejoin.__class__: plot_aircrafts}
example_systems = ([CanSat, DubinsRejoin])
capp = CsafApp("CSAF Examples", description=descr, systems=example_systems, plotters=plotters)
capp.main()
| [
"csaf_examples.rejoin.generate_dubins_system",
"csaf_examples.cansat.generate_cansat_system",
"numpy.deg2rad",
"csaf.utils.app.CsafApp"
] | [((540, 574), 'csaf_examples.cansat.generate_cansat_system', 'generate_cansat_system', (['sat_states'], {}), '(sat_states)\n', (562, 574), False, 'from csaf_examples.cansat import generate_cansat_system, plot_sats\n'), ((753, 785), 'csaf_examples.rejoin.generate_dubins_system', 'generate_dubins_system', (['j_states'], {}), '(j_states)\n', (775, 785), False, 'from csaf_examples.rejoin import generate_dubins_system, plot_aircrafts\n'), ((929, 1020), 'csaf.utils.app.CsafApp', 'CsafApp', (['"""CSAF Examples"""'], {'description': 'descr', 'systems': 'example_systems', 'plotters': 'plotters'}), "('CSAF Examples', description=descr, systems=example_systems,\n plotters=plotters)\n", (936, 1020), False, 'from csaf.utils.app import CsafApp\n'), ((598, 612), 'numpy.deg2rad', 'np.deg2rad', (['(45)'], {}), '(45)\n', (608, 612), True, 'import numpy as np\n'), ((639, 654), 'numpy.deg2rad', 'np.deg2rad', (['(-30)'], {}), '(-30)\n', (649, 654), True, 'import numpy as np\n'), ((679, 693), 'numpy.deg2rad', 'np.deg2rad', (['(90)'], {}), '(90)\n', (689, 693), True, 'import numpy as np\n'), ((717, 730), 'numpy.deg2rad', 'np.deg2rad', (['(0)'], {}), '(0)\n', (727, 730), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
""" Animations of the form z = f(x, y, t).
"""
import time
import numpy as np
from ..engine import Animation
from ..engine import Sprite
def frange(x=(0, 1), y=(0, 1), z=(0, 1)):
""" Apply range attributes to a function. """
def decorator(f):
f.range_x = x
f.range_y = y
f.range_z = z
return f
return decorator
class Fxyt(Sprite):
def __init__(self, f):
self.f = f
self.grid = np.mgrid[
f.range_x[0]:f.range_x[1]:8j,
f.range_y[0]:f.range_y[1]:8j]
self.x = self.grid[0, :, :]
self.y = self.grid[1, :, :]
self.gridi = np.mgrid[0:8, 0:8]
self.xi = self.gridi[0, :, :]
self.yi = self.gridi[1, :, :]
self.z_min = f.range_z[0]
self.z_resize = (f.range_z[1] - f.range_z[0]) / 8.
def render(self, frame):
t = time.time()
z = self.f(self.x, self.y, t)
z = (z - self.z_min) / self.z_resize
zi = np.floor(z).astype(np.int).clip(0, 7)
frame[zi, self.yi, self.xi] = 255
class FxytMexicanHat(Animation):
ANIMATION = __name__ + ".mexican_hat"
ARGS = {
}
def post_init(self):
self.fxyt = Fxyt(self.f)
def render(self, frame):
self.fxyt.render(frame)
@frange(x=(-8, 8), y=(-8, 8), z=(-0.2, 0.6))
def f(self, x, y, t):
R = np.sqrt(x**2 + y**2) + 0.01
A = np.sin(t)**2 + 0.01
return A * np.sin(R) / R
class FxytWaveY(FxytMexicanHat):
ANIMATION = __name__ + ".wavey"
@frange(x=(-np.pi, np.pi), y=(-np.pi, np.pi), z=(-1, 1))
def f(self, x, y, t):
return np.sin(y + 1.5 * t)
class FxytWaveXY(FxytMexicanHat):
ANIMATION = __name__ + ".wavexy"
@frange(x=(-np.pi/2, np.pi/2), y=(-np.pi/2, np.pi/2), z=(-1, 1))
def f(self, x, y, t):
return np.sin(x + 1.5 * t) * np.cos(y + 1.5 * t)
class FxytRotatingPlane(FxytMexicanHat):
ANIMATION = __name__ + ".rotplane"
@frange(x=(0, 1), y=(-1, 1), z=(-2, 2))
def f(self, x, y, t):
return y * np.sin(1.5 * t) + x * np.cos(1.5 * t)
class FxytRotatingParabaloid(FxytMexicanHat):
ANIMATION = __name__ + ".rotparab"
@frange(x=(-1, 1), y=(-1, 1), z=(-1, 0))
def f(self, x, y, t, s=0.7):
return - (x * np.sin(s * t) + y * np.cos(s * t)) ** 2
class FxytBreather(Animation):
ANIMATION = __name__ + ".breather"
ARGS = {
}
def post_init(self):
self.fxyt_1 = Fxyt(self.f_1)
self.fxyt_2 = Fxyt(self.f_2)
def render(self, frame):
self.fxyt_1.render(frame)
self.fxyt_2.render(frame)
@frange(x=(-1, 1), y=(-1, 1), z=(-1.24, 1.6))
def f_1(self, x, y, t):
return np.sqrt(x**2 + y**2) * np.sin(0.5 * t)
@frange(x=(-1, 1), y=(-1, 1), z=(-np.sqrt(2), np.sqrt(2)))
def f_2(self, x, y, t):
return - np.sqrt(x**2 + y**2) * np.sin(0.5 * t)
| [
"numpy.floor",
"time.time",
"numpy.sin",
"numpy.cos",
"numpy.sqrt"
] | [((890, 901), 'time.time', 'time.time', ([], {}), '()\n', (899, 901), False, 'import time\n'), ((1652, 1671), 'numpy.sin', 'np.sin', (['(y + 1.5 * t)'], {}), '(y + 1.5 * t)\n', (1658, 1671), True, 'import numpy as np\n'), ((1384, 1408), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (1391, 1408), True, 'import numpy as np\n'), ((1857, 1876), 'numpy.sin', 'np.sin', (['(x + 1.5 * t)'], {}), '(x + 1.5 * t)\n', (1863, 1876), True, 'import numpy as np\n'), ((1879, 1898), 'numpy.cos', 'np.cos', (['(y + 1.5 * t)'], {}), '(y + 1.5 * t)\n', (1885, 1898), True, 'import numpy as np\n'), ((2723, 2747), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (2730, 2747), True, 'import numpy as np\n'), ((2746, 2761), 'numpy.sin', 'np.sin', (['(0.5 * t)'], {}), '(0.5 * t)\n', (2752, 2761), True, 'import numpy as np\n'), ((2894, 2909), 'numpy.sin', 'np.sin', (['(0.5 * t)'], {}), '(0.5 * t)\n', (2900, 2909), True, 'import numpy as np\n'), ((1424, 1433), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (1430, 1433), True, 'import numpy as np\n'), ((1463, 1472), 'numpy.sin', 'np.sin', (['R'], {}), '(R)\n', (1469, 1472), True, 'import numpy as np\n'), ((2072, 2087), 'numpy.sin', 'np.sin', (['(1.5 * t)'], {}), '(1.5 * t)\n', (2078, 2087), True, 'import numpy as np\n'), ((2094, 2109), 'numpy.cos', 'np.cos', (['(1.5 * t)'], {}), '(1.5 * t)\n', (2100, 2109), True, 'import numpy as np\n'), ((2871, 2895), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (2878, 2895), True, 'import numpy as np\n'), ((2813, 2823), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2820, 2823), True, 'import numpy as np\n'), ((2801, 2811), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2808, 2811), True, 'import numpy as np\n'), ((998, 1009), 'numpy.floor', 'np.floor', (['z'], {}), '(z)\n', (1006, 1009), True, 'import numpy as np\n'), ((2299, 2312), 'numpy.sin', 'np.sin', (['(s * t)'], {}), '(s * t)\n', (2305, 2312), True, 'import numpy as np\n'), ((2319, 2332), 'numpy.cos', 'np.cos', (['(s * t)'], {}), '(s * t)\n', (2325, 2332), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : inference.py
@Contact : <EMAIL>
@License : (C)Copyright UCSD & Xing
@Modify Time @Author @Version @Desciption
------------ ------- -------- -----------
03/03/2021 10:18 Xing 1.0 Initial Framework Generation
03/03/2021 12:20 Xing 1.1 Most Done!
03/08/2021 18:19 Xing 1.2 Clean up annotations
'''
import cv2
import numpy as np
import nibabel as nib
import torch
from torch import nn
import torch.nn.functional as F
import argparse
import os
from resnet_3d_cbam import *
from skimage.filters import threshold_otsu, gaussian
def parse_arg():
parser = argparse.ArgumentParser(description='UCSD ImageClef2020')
parser.add_argument('--img_pth',help='image dir',default='E:\Xing\TB2020\data\Test_data\CTR_TST_data',type=str)
parser.add_argument('--msk1_pth', help='mask1 dir', default='E:\Xing\TB2020\data\Test_data\CTR_TST_masks1', type=str)
parser.add_argument('--msk2_pth', help='mask2 dir', default='E:\Xing\TB2020\data\Test_data\CTR_TST_masks2', type=str)
parser.add_argument('--img_id', help='image_id', default='CTR_TST_001.nii.gz', type=str)
parser.add_argument('--base_dir', help='base dir', default='E:\Xing\TB2020\data\Test_data', type=str)
parser.add_argument('--model_path', help='model dir', default= r'C:\Users\Xing\Projects\TB2020\train_log\Jun20_pw_resnet_cbam_bsmp_wfocal_fulldata\Sun21Jun2020-170404\save', type=str)
parser.add_argument('--model_name', help='base dir', default=r'\best_model_auc.pth', type=str)
parser.add_argument('--used_gpu', help='base dir', default='1,2', type=str)
args = parser.parse_args()
return args
class data_preprocess_config(object):
def __init__(self,args):
self.img_id = args.img_id
self.msk1_pth = args.msk1_pth
self.msk2_pth = args.msk2_pth
self.img_pth = args.img_pth
self.base_dir = args.base_dir
class data_preprocess(object):
def __init__(self,config):
self.config = config
def normalize_window(self, image_array, lower_sigma=2, upper_sigma=4, bg_thresh=None, bg_percentile=20, window='lung'):
# select the fg pixels
thresh_w = {
'lung': [-600, 1500],
'soft_tissue': [50, 350],
'bone': [400, 1800]
}
image_array = image_array.astype(np.float)
bg_thresh = threshold_otsu(image_array)
# print('background threshold {}'.format(bg_thresh))
image_array_fg = image_array[image_array > bg_thresh]
# select 5 pct to 95 pct to perform robust normalization
if window == 'normal':
pct_5 = np.percentile(image_array_fg, 5)
pct_95 = np.percentile(image_array_fg, 95)
else:
pct_5 = thresh_w[window][0]
pct_95 = thresh_w[window][1]
image_array_fg_robust = image_array_fg[(image_array_fg > pct_5) & (image_array_fg < pct_95)]
std = np.std(image_array_fg_robust)
mean = np.mean(image_array_fg_robust)
# set (mean - lower_sigma * std) to 0, and (mean + upper_sigma * std) to 1
a_min = mean - lower_sigma * std
a_max = mean + upper_sigma * std
# set bg pixels to a_min. Sometimes bg_threshold > a_min
image_array[image_array <= bg_thresh] = a_min
# clip
image_array_clipped = np.clip(image_array, a_min=a_min, a_max=a_max)
image_array_clipped = (image_array_clipped - a_min) / (a_max - a_min)
return image_array_clipped
def find_bound(self,msk, task='R'):
# take upper part as R
x, y = msk.shape
img_binary = (msk > 0).astype(np.uint8)
# plt.imshow(img_binary)
g = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 9))
img_open = cv2.morphologyEx(img_binary, cv2.MORPH_OPEN, g)
contours, hierarchy = cv2.findContours(img_open, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
area = 0
area_max = 0
for i, c in enumerate(contours):
area = cv2.contourArea(c)
if area_max < area:
area_max = area
c_max = c
if len(contours) > 0:
y_min = min(c_max[:, :, 1])[0]
y_max = max(c_max[:, :, 1])[0]
else:
y_min = y
y_max = 0 # this is a trick to avoid the interuptions.
if task == 'R':
return y_max
else:
return y_min
def msk_new(self, msk_s,msk_b,task = 'R'):
x,y,z = msk_b.shape
new_msk = np.zeros([x,y,z])
for i in range(z):
bound = self.find_bound(msk_s[:,:,i],task = task)
# print(i,bound)
if task == 'R':
new_msk[:bound,:,i] = msk_b[:bound,:,i]
else:
new_msk[bound:,:,i] = msk_b[bound:,:,i]
return new_msk
def img_crop_3d(self, image, msk, margin=2, ds=4, multi=False):
if multi:
image = image * msk
print('image multiplied with msk')
msk_z = np.sum(msk, axis=2)
msk_y = np.sum(msk, axis=0)
img_binary = (msk_z > 0).astype(np.uint8)
g = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 9))
img_open = cv2.morphologyEx(img_binary, cv2.MORPH_OPEN, g)
contours, hierarchy = cv2.findContours(img_open, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
area = 0
area_max = 0
minmax = []
for i, c in enumerate(contours):
# area[i] = cv2.contourArea(c)
# print('the area is %d'%area[i])
area = cv2.contourArea(c)
# if area_max < area:
# area_max = area
# c_max = c
x_min = min(c[:, :, 0])[0] - margin
x_max = max(c[:, :, 0])[0] + margin
y_min = min(c[:, :, 1])[0] - margin
y_max = max(c[:, :, 1])[0] + margin
if area > 10:
minmax.append([x_min, x_max, y_min, y_max])
# print(minmax)
x_min = min(np.array(minmax)[:, 0])
x_max = max(np.array(minmax)[:, 1])
y_min = min(np.array(minmax)[:, 2])
y_max = max(np.array(minmax)[:, 3])
msk_y = np.sum(msk, axis=0)
img_binary = (msk_y > 0).astype(np.uint8)
g = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 9))
img_open = cv2.morphologyEx(img_binary, cv2.MORPH_OPEN, g)
contours, hierarchy = cv2.findContours(img_open, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
area = 0
area_max = 0
for i, c in enumerate(contours):
# area[i] = cv2.contourArea(c)
# print('the area is %d'%area[i])
area = cv2.contourArea(c)
if area_max < area:
area_max = area
c_max = c
z_min = min(c_max[:, :, 0])[0] + margin * ds
z_max = max(c_max[:, :, 0])[0] - margin * ds
print(x_min, x_max, y_min, y_max, z_min, z_max)
img_crop = image[y_min:y_max, x_min:x_max, z_min:z_max]
return img_crop
def __call__(self, *args, **kwargs):
img = nib.load(os.path.join(self.config.img_pth, self.config.img_id))
msk = nib.load(os.path.join(self.config.msk1_pth, self.config.img_id))
mskw = nib.load(os.path.join(self.config.msk2_pth, self.config.img_id))
img_affine = img.affine
sl = img.header['pixdim'][3]
x, y, z = img.shape
center_sl = z // 2
msk_1 = np.zeros(msk.shape)
msk_2 = np.zeros(msk.shape)
msk_w = np.zeros(msk.shape)
img_f = img.get_fdata()
msk = msk.get_fdata()
mskw = mskw.get_fdata()
msk_1[msk == 1] = 1
msk_2[msk == 2] = 1
msk_1 = self.msk_new(msk_1, mskw, task='R') # upper part as R as 1
msk_2 = self.msk_new(msk_2, mskw, task='L') # lower part as L as 2
if sl > 2.5:
ds = 1
elif sl > 1.25:
ds = 2
else:
ds = 4
print(x, y, z, sl, ds)
img_d = self.normalize_window(img_f, window='normal')
img_l = self.normalize_window(img_f, window='lung')
img_s = self.normalize_window(img_f, window='soft_tissue')
img_b = self.normalize_window(img_f, window='bone')
meta_data = {}
msk_1d = img_d * msk_1
msk_2d = img_d * msk_2
img_1d = self.img_crop_3d(img_d, msk_1d, ds=ds, multi=True)
img_1l = self.img_crop_3d(img_l, msk_1d, ds=ds, multi=True)
img_1s = self.img_crop_3d(img_s, msk_1d, ds=ds, multi=True)
x, y, z = img_1d.shape
meta_data['Right'] = {}
meta_data['Left'] = {}
meta_data['Right'] = {'data': img_1d, 'data_l': img_1l, 'data_s': img_1s, 'mask': msk_1d, 'len': z, 'ds': ds}
img_2d = self.img_crop_3d(img_d, msk_2d, ds=ds, multi=True)
img_2l = self.img_crop_3d(img_l, msk_2d, ds=ds, multi=True)
img_2s = self.img_crop_3d(img_s, msk_2d, ds=ds, multi=True)
x, y, z = img_2d.shape
meta_data['Left'] = {'data': img_2d, 'data_l': img_2l, 'data_s': img_2s, 'mask': msk_2d, 'len': z, 'ds': ds}
return meta_data
class model_infer_config(object):
def __init__(self,args):
self.model_type = 'cbam_bsmp_wfocal_fulldata'
self.model_name = args.model_name
self.model_path = args.model_path
self.used_gpu = args.used_gpu
self.num_classes = 3
class model_infer(object):
def __init__(self,config):
self.config = config
self.gpu = config.used_gpu
self.num_classes = config.num_classes
self.len = 64
self.imsz = 256
self.inputs_val = torch.rand(1, 4, self.len, self.imsz, self.imsz).cuda()
self.get_model()
# self.transform = transforms.ToTensor()
self.transform = None
def interp(self, img, con_len):
img_t = torch.tensor(img)
img_t = img_t.unsqueeze(0).unsqueeze(0)
img_r = F.interpolate(img_t, [self.imsz, self.imsz, con_len])
img_r = np.array(img_r).squeeze()
return img_r
def get_data(self,data):
# self.input_val = torch.autograd.Variable(torch.rand(1,4,64,256,256)).cuda()
# self.inputs_val = torch.rand(1, 4, 64, 256, 256).cuda()
img = data['data']
img_l = data['data_l']
img_s = data['data_s']
img = self.interp(img, self.len)
img_l = self.interp(img_l, self.len)
img_s = self.interp(img_s, self.len)
img_c = np.array([img, img_l, img_s, img])
image = torch.tensor(img_c.transpose(0, 3, 1, 2))
if self.transform:
image = self.transform(image)
self.inputs_val = image.unsqueeze(dim=0)
def get_model(self):
torch.manual_seed(1)
torch.cuda.manual_seed(1)
os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu
self.model = resnet34(sample_size=self.imsz, sample_duration=self.len, num_classes=self.num_classes)
if torch.cuda.device_count() > 1:
self.model = nn.DataParallel(self.model).cuda()
checkpoint = torch.load(self.config.model_path + self.config.model_name)
# print(key, checkpoint['epoch'])
self.model.load_state_dict(checkpoint['state_dict'])
self.model.cuda()
self.model.eval()
def __call__(self, *args, **kwargs):
outputs_val = self.model(self.inputs_val.float().cuda())
outputs_val = torch.sigmoid(outputs_val)
return outputs_val.detach().cpu().numpy()
if __name__ == "__main__":
args = parse_arg()
img_config = data_preprocess_config(args=args)
model_config = model_infer_config(args = args)
meta_data = data_preprocess(config=img_config)()
infered_model = model_infer(config=model_config)
for key in meta_data.keys():
infered_model.get_data(meta_data[key])
results = infered_model()
meta_data[key]['predict'] = results
print(key,results) | [
"numpy.sum",
"argparse.ArgumentParser",
"numpy.clip",
"torch.cuda.device_count",
"numpy.mean",
"os.path.join",
"cv2.contourArea",
"skimage.filters.threshold_otsu",
"numpy.std",
"torch.load",
"cv2.morphologyEx",
"torch.manual_seed",
"torch.cuda.manual_seed",
"numpy.percentile",
"torch.ran... | [((695, 752), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""UCSD ImageClef2020"""'}), "(description='UCSD ImageClef2020')\n", (718, 752), False, 'import argparse\n'), ((2437, 2464), 'skimage.filters.threshold_otsu', 'threshold_otsu', (['image_array'], {}), '(image_array)\n', (2451, 2464), False, 'from skimage.filters import threshold_otsu, gaussian\n'), ((3011, 3040), 'numpy.std', 'np.std', (['image_array_fg_robust'], {}), '(image_array_fg_robust)\n', (3017, 3040), True, 'import numpy as np\n'), ((3057, 3087), 'numpy.mean', 'np.mean', (['image_array_fg_robust'], {}), '(image_array_fg_robust)\n', (3064, 3087), True, 'import numpy as np\n'), ((3424, 3470), 'numpy.clip', 'np.clip', (['image_array'], {'a_min': 'a_min', 'a_max': 'a_max'}), '(image_array, a_min=a_min, a_max=a_max)\n', (3431, 3470), True, 'import numpy as np\n'), ((3781, 3830), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(9, 9)'], {}), '(cv2.MORPH_RECT, (9, 9))\n', (3806, 3830), False, 'import cv2\n'), ((3850, 3897), 'cv2.morphologyEx', 'cv2.morphologyEx', (['img_binary', 'cv2.MORPH_OPEN', 'g'], {}), '(img_binary, cv2.MORPH_OPEN, g)\n', (3866, 3897), False, 'import cv2\n'), ((3928, 3992), 'cv2.findContours', 'cv2.findContours', (['img_open', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_NONE'], {}), '(img_open, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n', (3944, 3992), False, 'import cv2\n'), ((4606, 4625), 'numpy.zeros', 'np.zeros', (['[x, y, z]'], {}), '([x, y, z])\n', (4614, 4625), True, 'import numpy as np\n'), ((5107, 5126), 'numpy.sum', 'np.sum', (['msk'], {'axis': '(2)'}), '(msk, axis=2)\n', (5113, 5126), True, 'import numpy as np\n'), ((5143, 5162), 'numpy.sum', 'np.sum', (['msk'], {'axis': '(0)'}), '(msk, axis=0)\n', (5149, 5162), True, 'import numpy as np\n'), ((5226, 5275), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(9, 9)'], {}), '(cv2.MORPH_RECT, (9, 9))\n', (5251, 5275), False, 'import cv2\n'), ((5295, 5342), 'cv2.morphologyEx', 'cv2.morphologyEx', (['img_binary', 'cv2.MORPH_OPEN', 'g'], {}), '(img_binary, cv2.MORPH_OPEN, g)\n', (5311, 5342), False, 'import cv2\n'), ((5373, 5437), 'cv2.findContours', 'cv2.findContours', (['img_open', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_NONE'], {}), '(img_open, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n', (5389, 5437), False, 'import cv2\n'), ((6302, 6321), 'numpy.sum', 'np.sum', (['msk'], {'axis': '(0)'}), '(msk, axis=0)\n', (6308, 6321), True, 'import numpy as np\n'), ((6385, 6434), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(9, 9)'], {}), '(cv2.MORPH_RECT, (9, 9))\n', (6410, 6434), False, 'import cv2\n'), ((6454, 6501), 'cv2.morphologyEx', 'cv2.morphologyEx', (['img_binary', 'cv2.MORPH_OPEN', 'g'], {}), '(img_binary, cv2.MORPH_OPEN, g)\n', (6470, 6501), False, 'import cv2\n'), ((6532, 6596), 'cv2.findContours', 'cv2.findContours', (['img_open', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_NONE'], {}), '(img_open, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n', (6548, 6596), False, 'import cv2\n'), ((7587, 7606), 'numpy.zeros', 'np.zeros', (['msk.shape'], {}), '(msk.shape)\n', (7595, 7606), True, 'import numpy as np\n'), ((7623, 7642), 'numpy.zeros', 'np.zeros', (['msk.shape'], {}), '(msk.shape)\n', (7631, 7642), True, 'import numpy as np\n'), ((7659, 7678), 'numpy.zeros', 'np.zeros', (['msk.shape'], {}), '(msk.shape)\n', (7667, 7678), True, 'import numpy as np\n'), ((9993, 10010), 'torch.tensor', 'torch.tensor', (['img'], {}), '(img)\n', (10005, 10010), False, 'import torch\n'), ((10076, 10129), 'torch.nn.functional.interpolate', 'F.interpolate', (['img_t', '[self.imsz, self.imsz, con_len]'], {}), '(img_t, [self.imsz, self.imsz, con_len])\n', (10089, 10129), True, 'import torch.nn.functional as F\n'), ((10613, 10647), 'numpy.array', 'np.array', (['[img, img_l, img_s, img]'], {}), '([img, img_l, img_s, img])\n', (10621, 10647), True, 'import numpy as np\n'), ((10863, 10883), 'torch.manual_seed', 'torch.manual_seed', (['(1)'], {}), '(1)\n', (10880, 10883), False, 'import torch\n'), ((10892, 10917), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(1)'], {}), '(1)\n', (10914, 10917), False, 'import torch\n'), ((11206, 11265), 'torch.load', 'torch.load', (['(self.config.model_path + self.config.model_name)'], {}), '(self.config.model_path + self.config.model_name)\n', (11216, 11265), False, 'import torch\n'), ((11550, 11576), 'torch.sigmoid', 'torch.sigmoid', (['outputs_val'], {}), '(outputs_val)\n', (11563, 11576), False, 'import torch\n'), ((2709, 2741), 'numpy.percentile', 'np.percentile', (['image_array_fg', '(5)'], {}), '(image_array_fg, 5)\n', (2722, 2741), True, 'import numpy as np\n'), ((2764, 2797), 'numpy.percentile', 'np.percentile', (['image_array_fg', '(95)'], {}), '(image_array_fg, 95)\n', (2777, 2797), True, 'import numpy as np\n'), ((4091, 4109), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (4106, 4109), False, 'import cv2\n'), ((5662, 5680), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (5677, 5680), False, 'import cv2\n'), ((6801, 6819), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (6816, 6819), False, 'import cv2\n'), ((7230, 7283), 'os.path.join', 'os.path.join', (['self.config.img_pth', 'self.config.img_id'], {}), '(self.config.img_pth, self.config.img_id)\n', (7242, 7283), False, 'import os\n'), ((7308, 7362), 'os.path.join', 'os.path.join', (['self.config.msk1_pth', 'self.config.img_id'], {}), '(self.config.msk1_pth, self.config.img_id)\n', (7320, 7362), False, 'import os\n'), ((7388, 7442), 'os.path.join', 'os.path.join', (['self.config.msk2_pth', 'self.config.img_id'], {}), '(self.config.msk2_pth, self.config.img_id)\n', (7400, 7442), False, 'import os\n'), ((11093, 11118), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (11116, 11118), False, 'import torch\n'), ((6129, 6145), 'numpy.array', 'np.array', (['minmax'], {}), '(minmax)\n', (6137, 6145), True, 'import numpy as np\n'), ((6173, 6189), 'numpy.array', 'np.array', (['minmax'], {}), '(minmax)\n', (6181, 6189), True, 'import numpy as np\n'), ((6217, 6233), 'numpy.array', 'np.array', (['minmax'], {}), '(minmax)\n', (6225, 6233), True, 'import numpy as np\n'), ((6261, 6277), 'numpy.array', 'np.array', (['minmax'], {}), '(minmax)\n', (6269, 6277), True, 'import numpy as np\n'), ((9780, 9828), 'torch.rand', 'torch.rand', (['(1)', '(4)', 'self.len', 'self.imsz', 'self.imsz'], {}), '(1, 4, self.len, self.imsz, self.imsz)\n', (9790, 9828), False, 'import torch\n'), ((10146, 10161), 'numpy.array', 'np.array', (['img_r'], {}), '(img_r)\n', (10154, 10161), True, 'import numpy as np\n'), ((11149, 11176), 'torch.nn.DataParallel', 'nn.DataParallel', (['self.model'], {}), '(self.model)\n', (11164, 11176), False, 'from torch import nn\n')] |
"""a series of useful pytorch operations related to bbox transformation."""
import torch
import numpy as np
def torch_to_np_dtype(ttype):
type_map = {
torch.float16: np.dtype(np.float16),
torch.float32: np.dtype(np.float32),
torch.float16: np.dtype(np.float64),
torch.int32: np.dtype(np.int32),
torch.int64: np.dtype(np.int64),
torch.uint8: np.dtype(np.uint8),
}
return type_map[ttype]
def corners_nd(dims, origin=0.5):
"""generate relative box corners based on length per dim and
origin point.
Args:
dims (float array, shape=[N, ndim]): array of length per dim
origin (list or array or float): origin point relate to smallest point.
dtype (output dtype, optional): Defaults to np.float32
Returns:
float array, shape=[N, 2 ** ndim, ndim]: returned corners.
point layout example: (2d) x0y0, x0y1, x1y0, x1y1;
(3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1
where x0 < x1, y0 < y1, z0 < z1
"""
ndim = int(dims.shape[1])
dtype = torch_to_np_dtype(dims.dtype)
if isinstance(origin, float):
origin = [origin] * ndim
corners_norm = np.stack(
np.unravel_index(np.arange(2 ** ndim), [2] * ndim), axis=1).astype(dtype)
# now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1
# (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1
# so need to convert to a format which is convenient to do other computing.
# for 2d boxes, format is clockwise start from minimum point
# for 3d boxes, please draw them by your hand.
if ndim == 2:
# generate clockwise box corners
corners_norm = corners_norm[[0, 1, 3, 2]]
elif ndim == 3:
corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]]
else:
raise ValueError('ndim shoule be 2 or 3')
corners_norm = corners_norm - np.array(origin, dtype=dtype)
corners_norm = torch.from_numpy(corners_norm).type_as(dims)
corners = dims.view(-1, 1, ndim) * corners_norm.view(1, 2 ** ndim, ndim)
return corners
def rotation_3d_in_axis(points, angles, axis=0):
# points: [N, point_size, 3]
# angles: [N]
rot_sin = torch.sin(angles)
rot_cos = torch.cos(angles)
ones = torch.ones_like(rot_cos)
zeros = torch.zeros_like(rot_cos)
if axis == 1:
rot_mat_T = torch.stack([
torch.stack([rot_cos, zeros, -rot_sin]),
torch.stack([zeros, ones, zeros]),
torch.stack([rot_sin, zeros, rot_cos])
])
elif axis == 2 or axis == -1:
rot_mat_T = torch.stack([
torch.stack([rot_cos, -rot_sin, zeros]),
torch.stack([rot_sin, rot_cos, zeros]),
torch.stack([zeros, zeros, ones])
])
elif axis == 0:
# TODO: check why not in stand form?
rot_mat_T = torch.stack([
torch.stack([zeros, rot_cos, -rot_sin]),
torch.stack([zeros, rot_sin, rot_cos]),
torch.stack([ones, zeros, zeros])
])
else:
raise ValueError("axis should in range")
return torch.einsum('aij,jka->aik', (points, rot_mat_T))
def center_to_corner_box3d(centers,
dims,
angles,
origin=[0.5, 1.0, 0.5],
axis=1):
"""convert kitti locations, dimensions and angles to corners
Args:
centers (float array, shape=[N, 3]): locations in kitti label file.
dims (float array, shape=[N, 3]): dimensions in kitti label file.
angles (float array, shape=[N]): rotation_y in kitti label file.
origin (list or array or float): origin point relate to smallest point.
use [0.5, 1.0, 0.5] in camera and [0.5, 0.5, 0] in lidar.
axis (int): rotation axis. 1 for camera and 2 for lidar.
Returns:
[type]: [description]
"""
# 'length' in kitti format is in x axis.
# yzx(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar)
# center in kitti format is [0.5, 1.0, 0.5] in xyz.
corners = corners_nd(dims, origin=origin)
# corners: [N, 8, 3]
corners = rotation_3d_in_axis(corners, angles, axis=axis)
corners += centers.view(-1, 1, 3)
return corners
def lidar_to_camera(points, r_rect, velo2cam):
"""
TODO: how to handle any dimensional points data
:param points: [B, N, 3/4]
:param r_rect: [B, 4, 4]
:param velo2cam: [B, 4, 4]
:return:
"""
assert len(points.shape) == 2 or len(points.shape) == 3
assert len(points.shape) == len(r_rect.shape)
assert len(points.shape) == len(velo2cam.shape)
points = torch.cat([points, torch.ones_like(points[..., 0:1])], dim=-1)
camera_points = points @ (r_rect @ velo2cam).t()
return camera_points[..., :3]
def pseudo_lidar_to_camera(points):
assert points.shape[-1] == 3
points_shape = points.shape[:-1]
r_rect = torch.eye(4, dtype=torch.float32, device=points.device)
velo2cam = torch.tensor([[0, -1, 0, 0], # cam x: -velo y
[0, 0, -1, 0], # cam y: -velo z
[1, 0, 0, 0], # cam z: velo x
[0, 0, 0, 1]], dtype=torch.float32, device=points.device)
return lidar_to_camera(points.view(-1, 3), r_rect, velo2cam).view(*points_shape, 3)
def camera_to_lidar(points, r_rect, velo2cam):
"""transform points in camera coordinate to lidar coordinate
:param points: [B, ..., 3] points in camera coordinate
:param r_rect: [B, 4, 4] camera rectification transformation matrix
:param velo2cam: [B, 4, 4] velo to cam transformation matrix
:return: lidar_points: [B, ..., 3] points in LIDAR coordinate
"""
points = torch.cat([points, torch.ones_like(points[..., 0:1])], dim=-1)
assert points.shape[-1] == 4
shape_per_sample = points.shape[1:-1]
batch_size = points.shape[0]
points = points.view(batch_size, -1, 4)
lidar_points = points @ torch.inverse(r_rect @ velo2cam).transpose(-2, -1)
lidar_points = lidar_points.view(batch_size, *shape_per_sample, 4)
return lidar_points[..., :3]
def box_lidar_to_camera(data, r_rect, velo2cam):
xyz_lidar = data[..., 0:3]
w, l, h = data[..., 3:4], data[..., 4:5], data[..., 5:6]
r = data[..., 6:7]
xyz = lidar_to_camera(xyz_lidar, r_rect, velo2cam)
return torch.cat([xyz, l, h, w, r], dim=-1)
def box_pseudo_lidar_to_camera(data):
xyz_lidar = data[..., 0:3]
w, l, h = data[..., 3:4], data[..., 4:5], data[..., 5:6]
r = data[..., 6:7]
xyz = pseudo_lidar_to_camera(xyz_lidar)
return torch.cat([xyz, l, h, w, r], dim=-1)
def project_to_image(points_3d, proj_mat):
points_num = list(points_3d.shape)[:-1]
points_shape = np.concatenate([points_num, [1]], axis=0).tolist()
points_4 = torch.cat(
[points_3d, torch.zeros(*points_shape).type_as(points_3d)], dim=-1)
# point_2d = points_4 @ tf.transpose(proj_mat, [1, 0])
point_2d = torch.matmul(points_4, proj_mat.t())
point_2d_res = point_2d[..., :2] / point_2d[..., 2:3]
return point_2d_res
| [
"torch.ones_like",
"torch.eye",
"numpy.concatenate",
"torch.zeros_like",
"torch.stack",
"numpy.dtype",
"torch.cat",
"torch.cos",
"torch.einsum",
"numpy.array",
"numpy.arange",
"torch.zeros",
"torch.inverse",
"torch.sin",
"torch.tensor",
"torch.from_numpy"
] | [((2240, 2257), 'torch.sin', 'torch.sin', (['angles'], {}), '(angles)\n', (2249, 2257), False, 'import torch\n'), ((2272, 2289), 'torch.cos', 'torch.cos', (['angles'], {}), '(angles)\n', (2281, 2289), False, 'import torch\n'), ((2301, 2325), 'torch.ones_like', 'torch.ones_like', (['rot_cos'], {}), '(rot_cos)\n', (2316, 2325), False, 'import torch\n'), ((2338, 2363), 'torch.zeros_like', 'torch.zeros_like', (['rot_cos'], {}), '(rot_cos)\n', (2354, 2363), False, 'import torch\n'), ((3140, 3189), 'torch.einsum', 'torch.einsum', (['"""aij,jka->aik"""', '(points, rot_mat_T)'], {}), "('aij,jka->aik', (points, rot_mat_T))\n", (3152, 3189), False, 'import torch\n'), ((4983, 5038), 'torch.eye', 'torch.eye', (['(4)'], {'dtype': 'torch.float32', 'device': 'points.device'}), '(4, dtype=torch.float32, device=points.device)\n', (4992, 5038), False, 'import torch\n'), ((5054, 5173), 'torch.tensor', 'torch.tensor', (['[[0, -1, 0, 0], [0, 0, -1, 0], [1, 0, 0, 0], [0, 0, 0, 1]]'], {'dtype': 'torch.float32', 'device': 'points.device'}), '([[0, -1, 0, 0], [0, 0, -1, 0], [1, 0, 0, 0], [0, 0, 0, 1]],\n dtype=torch.float32, device=points.device)\n', (5066, 5173), False, 'import torch\n'), ((6416, 6452), 'torch.cat', 'torch.cat', (['[xyz, l, h, w, r]'], {'dim': '(-1)'}), '([xyz, l, h, w, r], dim=-1)\n', (6425, 6452), False, 'import torch\n'), ((6663, 6699), 'torch.cat', 'torch.cat', (['[xyz, l, h, w, r]'], {'dim': '(-1)'}), '([xyz, l, h, w, r], dim=-1)\n', (6672, 6699), False, 'import torch\n'), ((181, 201), 'numpy.dtype', 'np.dtype', (['np.float16'], {}), '(np.float16)\n', (189, 201), True, 'import numpy as np\n'), ((226, 246), 'numpy.dtype', 'np.dtype', (['np.float32'], {}), '(np.float32)\n', (234, 246), True, 'import numpy as np\n'), ((271, 291), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (279, 291), True, 'import numpy as np\n'), ((314, 332), 'numpy.dtype', 'np.dtype', (['np.int32'], {}), '(np.int32)\n', (322, 332), True, 'import numpy as np\n'), ((355, 373), 'numpy.dtype', 'np.dtype', (['np.int64'], {}), '(np.int64)\n', (363, 373), True, 'import numpy as np\n'), ((396, 414), 'numpy.dtype', 'np.dtype', (['np.uint8'], {}), '(np.uint8)\n', (404, 414), True, 'import numpy as np\n'), ((1934, 1963), 'numpy.array', 'np.array', (['origin'], {'dtype': 'dtype'}), '(origin, dtype=dtype)\n', (1942, 1963), True, 'import numpy as np\n'), ((1983, 2013), 'torch.from_numpy', 'torch.from_numpy', (['corners_norm'], {}), '(corners_norm)\n', (1999, 2013), False, 'import torch\n'), ((4731, 4764), 'torch.ones_like', 'torch.ones_like', (['points[..., 0:1]'], {}), '(points[..., 0:1])\n', (4746, 4764), False, 'import torch\n'), ((5805, 5838), 'torch.ones_like', 'torch.ones_like', (['points[..., 0:1]'], {}), '(points[..., 0:1])\n', (5820, 5838), False, 'import torch\n'), ((6808, 6849), 'numpy.concatenate', 'np.concatenate', (['[points_num, [1]]'], {'axis': '(0)'}), '([points_num, [1]], axis=0)\n', (6822, 6849), True, 'import numpy as np\n'), ((2428, 2467), 'torch.stack', 'torch.stack', (['[rot_cos, zeros, -rot_sin]'], {}), '([rot_cos, zeros, -rot_sin])\n', (2439, 2467), False, 'import torch\n'), ((2481, 2514), 'torch.stack', 'torch.stack', (['[zeros, ones, zeros]'], {}), '([zeros, ones, zeros])\n', (2492, 2514), False, 'import torch\n'), ((2528, 2566), 'torch.stack', 'torch.stack', (['[rot_sin, zeros, rot_cos]'], {}), '([rot_sin, zeros, rot_cos])\n', (2539, 2566), False, 'import torch\n'), ((6029, 6061), 'torch.inverse', 'torch.inverse', (['(r_rect @ velo2cam)'], {}), '(r_rect @ velo2cam)\n', (6042, 6061), False, 'import torch\n'), ((1259, 1279), 'numpy.arange', 'np.arange', (['(2 ** ndim)'], {}), '(2 ** ndim)\n', (1268, 1279), True, 'import numpy as np\n'), ((2658, 2697), 'torch.stack', 'torch.stack', (['[rot_cos, -rot_sin, zeros]'], {}), '([rot_cos, -rot_sin, zeros])\n', (2669, 2697), False, 'import torch\n'), ((2711, 2749), 'torch.stack', 'torch.stack', (['[rot_sin, rot_cos, zeros]'], {}), '([rot_sin, rot_cos, zeros])\n', (2722, 2749), False, 'import torch\n'), ((2763, 2796), 'torch.stack', 'torch.stack', (['[zeros, zeros, ones]'], {}), '([zeros, zeros, ones])\n', (2774, 2796), False, 'import torch\n'), ((6905, 6931), 'torch.zeros', 'torch.zeros', (['*points_shape'], {}), '(*points_shape)\n', (6916, 6931), False, 'import torch\n'), ((2919, 2958), 'torch.stack', 'torch.stack', (['[zeros, rot_cos, -rot_sin]'], {}), '([zeros, rot_cos, -rot_sin])\n', (2930, 2958), False, 'import torch\n'), ((2972, 3010), 'torch.stack', 'torch.stack', (['[zeros, rot_sin, rot_cos]'], {}), '([zeros, rot_sin, rot_cos])\n', (2983, 3010), False, 'import torch\n'), ((3024, 3057), 'torch.stack', 'torch.stack', (['[ones, zeros, zeros]'], {}), '([ones, zeros, zeros])\n', (3035, 3057), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
import base64
import cv2
from flask import Flask, render_template
from flask_socketio import SocketIO, emit
import multiprocessing
import numpy as np
import os
# logging
from logging import getLogger, NullHandler, CRITICAL
logger = getLogger(__name__)
logger.addHandler(NullHandler())
# disable werkzeug logger
werkzeug_logger = getLogger('werkzeug')
werkzeug_logger.setLevel(CRITICAL)
# disable werkzeug logger
engineio_logger = getLogger('engineio')
engineio_logger.setLevel(CRITICAL)
# disable socketio logger
socketio_logger = getLogger('socketio')
socketio_logger.setLevel(CRITICAL)
IO_NAMESPACE = '/liveuploader'
ASYNC_MODE = 'eventlet'
def decodeimg(img):
'''decode from jpg/png base64 string image'''
try:
img = img[img.find(',') + 1:]
img = base64.decodestring(img.encode('ascii'))
img = np.fromstring(img, dtype=np.uint8)
img = cv2.imdecode(img, 1)
return img
except Exception:
logger.error('Failed to decodeimg()')
return None
def encodeimg(img, ext='.jpeg'):
try:
ret, img = cv2.imencode(ext, img)
if not ret:
raise
img = img.tostring()
img = base64.encodestring(img)
img = 'data:image/jpeg;base64,' + img.decode('ascii')
return img
except Exception:
logger.error('Failed to encodeimg()')
return None
def encodeImgElement(data, key):
try:
img = encodeimg(data[key])
if img is None:
raise Exception()
data[key] = img
except KeyError:
logger.error('No image data (key: %s)' % key)
except:
logger.error('Invalid image data (key: %s)' % key)
try:
data.pop(key)
except:
pass
def rotateImg(img, deg):
h, w = img.shape[:2]
M = cv2.getRotationMatrix2D((w / 2, h / 2), deg, 1.0)
rotated_img = cv2.warpAffine(img, M, (w, h))
return rotated_img
def rotateImgElement(data, key, deg):
try:
img = rotateImg(data[key], deg)
if img is None:
raise Exception()
data[key] = img
except KeyError:
logger.error('No image data (key: %s)' % key)
except:
logger.error('Invalid image data (key: %s)' % key)
try:
data.pop(key)
except:
pass
def new_server(request_queue, response_queue, stop_page, port, secret_key):
# create server
app = Flask(__name__, static_url_path='/static')
app.config['SECRET_KEY'] = secret_key
socketio = SocketIO(app, async_mode=ASYNC_MODE,
logger=False, engineio_logger=False)
# rooting
@app.route('/')
def __index():
logger.info('Render uploader page')
return render_template('index.html', script="index.js")
if stop_page:
@app.route('/stop')
def __stop():
socketio.stop()
logger.info('Server stop request')
return 'This server is stopped'
@socketio.on('connect', namespace=IO_NAMESPACE)
def __on_upload_connect():
logger.info('New live uploader connection is established')
@socketio.on('disconnect', namespace=IO_NAMESPACE)
def __on_upload_disconnect():
logger.info('Live uploader connection is closed')
@socketio.on('upload_img', namespace=IO_NAMESPACE)
def __on_upload_image(data):
logger.debug('New image is received')
# check need to output
if request_queue is None:
return
# decode from jpeg base64 string
try:
img = data['img']
except KeyError:
logger.error('Invalid data type')
return
img = decodeimg(img)
if img is None:
return
# Rotate 180
if 'rotate' in data and data['rotate']:
img = rotateImg(img, 180)
# put into output queue
request_queue.put(img)
# emit response
if response_queue is not None:
# wait for response
resp_data = response_queue.get()
# Rotate 180
if 'rotate' in data and data['rotate']:
rotateImgElement(resp_data, key='img', deg=180)
# encode image
encodeImgElement(resp_data, key='img')
# emit
logger.debug('Emit response')
emit('response', resp_data, namespace=IO_NAMESPACE)
# start server
logger.info('Start server on port %d' % port)
socketio.run(app, host='0.0.0.0', port=port, debug=False, log_output=False)
logger.info('Stop server on port %d' % port)
def start(request_queue, response_queue=None, stop_page=True, port=5000,
secret_key=os.urandom(24)):
'''Start new image uploading server on `port`.
This function create new daemon process and start it.
arguments:
* request_queue (multiprocessing.Queue): output queue.
It returns a image (np.ndarray).
* response_queue (multiprocessing.Queue): input queue.
The input type is dict and it can contain
'img': (np.ndarray), 'msg': (str).
* stop_page (bool): enable server stop page "/stop".
* port (int): server port
If there are no need to use IO, set corresponding queues to `None`.
'''
process = multiprocessing.Process(target=new_server,
args=(request_queue, response_queue,
stop_page, port, secret_key))
process.daemon = True
process.start()
| [
"flask.Flask",
"cv2.imdecode",
"cv2.warpAffine",
"numpy.fromstring",
"cv2.imencode",
"flask_socketio.emit",
"logging.NullHandler",
"flask_socketio.SocketIO",
"base64.encodestring",
"flask.render_template",
"multiprocessing.Process",
"os.urandom",
"cv2.getRotationMatrix2D",
"logging.getLogg... | [((258, 277), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (267, 277), False, 'from logging import getLogger, NullHandler, CRITICAL\n'), ((356, 377), 'logging.getLogger', 'getLogger', (['"""werkzeug"""'], {}), "('werkzeug')\n", (365, 377), False, 'from logging import getLogger, NullHandler, CRITICAL\n'), ((457, 478), 'logging.getLogger', 'getLogger', (['"""engineio"""'], {}), "('engineio')\n", (466, 478), False, 'from logging import getLogger, NullHandler, CRITICAL\n'), ((558, 579), 'logging.getLogger', 'getLogger', (['"""socketio"""'], {}), "('socketio')\n", (567, 579), False, 'from logging import getLogger, NullHandler, CRITICAL\n'), ((296, 309), 'logging.NullHandler', 'NullHandler', ([], {}), '()\n', (307, 309), False, 'from logging import getLogger, NullHandler, CRITICAL\n'), ((1832, 1881), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(w / 2, h / 2)', 'deg', '(1.0)'], {}), '((w / 2, h / 2), deg, 1.0)\n', (1855, 1881), False, 'import cv2\n'), ((1900, 1930), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'M', '(w, h)'], {}), '(img, M, (w, h))\n', (1914, 1930), False, 'import cv2\n'), ((2445, 2487), 'flask.Flask', 'Flask', (['__name__'], {'static_url_path': '"""/static"""'}), "(__name__, static_url_path='/static')\n", (2450, 2487), False, 'from flask import Flask, render_template\n'), ((2545, 2618), 'flask_socketio.SocketIO', 'SocketIO', (['app'], {'async_mode': 'ASYNC_MODE', 'logger': '(False)', 'engineio_logger': '(False)'}), '(app, async_mode=ASYNC_MODE, logger=False, engineio_logger=False)\n', (2553, 2618), False, 'from flask_socketio import SocketIO, emit\n'), ((4709, 4723), 'os.urandom', 'os.urandom', (['(24)'], {}), '(24)\n', (4719, 4723), False, 'import os\n'), ((5313, 5426), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'new_server', 'args': '(request_queue, response_queue, stop_page, port, secret_key)'}), '(target=new_server, args=(request_queue,\n response_queue, stop_page, port, secret_key))\n', (5336, 5426), False, 'import multiprocessing\n'), ((860, 894), 'numpy.fromstring', 'np.fromstring', (['img'], {'dtype': 'np.uint8'}), '(img, dtype=np.uint8)\n', (873, 894), True, 'import numpy as np\n'), ((909, 929), 'cv2.imdecode', 'cv2.imdecode', (['img', '(1)'], {}), '(img, 1)\n', (921, 929), False, 'import cv2\n'), ((1100, 1122), 'cv2.imencode', 'cv2.imencode', (['ext', 'img'], {}), '(ext, img)\n', (1112, 1122), False, 'import cv2\n'), ((1204, 1228), 'base64.encodestring', 'base64.encodestring', (['img'], {}), '(img)\n', (1223, 1228), False, 'import base64\n'), ((2756, 2804), 'flask.render_template', 'render_template', (['"""index.html"""'], {'script': '"""index.js"""'}), "('index.html', script='index.js')\n", (2771, 2804), False, 'from flask import Flask, render_template\n'), ((4362, 4413), 'flask_socketio.emit', 'emit', (['"""response"""', 'resp_data'], {'namespace': 'IO_NAMESPACE'}), "('response', resp_data, namespace=IO_NAMESPACE)\n", (4366, 4413), False, 'from flask_socketio import SocketIO, emit\n')] |
from abc import ABCMeta
from abc import abstractmethod
import numpy as np
import torch
from disprcnn.modeling.sassd_module.core.bbox3d.geometry import center_to_corner_box3d
def second_box_encode(boxes, anchors, encode_angle_to_vector=False, smooth_dim=False):
"""box encode for VoxelNet in lidar
Args:
boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r
anchors ([N, 7] Tensor): anchors
encode_angle_to_vector: bool. increase aos performance,
decrease other performance.
"""
# need to convert boxes to z-center format
xa, ya, za, wa, la, ha, ra = np.split(anchors, 7, axis=-1)
xg, yg, zg, wg, lg, hg, rg = np.split(boxes, 7, axis=-1)
zg = zg + hg / 2
za = za + ha / 2
diagonal = np.sqrt(la**2 + wa**2) # 4.3
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / ha # 1.6
if smooth_dim:
lt = lg / la - 1
wt = wg / wa - 1
ht = hg / ha - 1
else:
lt = np.log(lg / la)
wt = np.log(wg / wa)
ht = np.log(hg / ha)
if encode_angle_to_vector:
rgx = np.cos(rg)
rgy = np.sin(rg)
rax = np.cos(ra)
ray = np.sin(ra)
rtx = rgx - rax
rty = rgy - ray
return np.concatenate([xt, yt, zt, wt, lt, ht, rtx, rty], axis=-1)
else:
rt = rg - ra
return np.concatenate([xt, yt, zt, wt, lt, ht, rt], axis=-1)
def second_box_decode(box_encodings, anchors, encode_angle_to_vector=False, smooth_dim=False):
"""box decode for VoxelNet in lidar
Args:
boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r
anchors ([N, 7] Tensor): anchors
"""
xa, ya, za, wa, la, ha, ra = torch.split(anchors, 1, dim=-1)
if encode_angle_to_vector:
xt, yt, zt, wt, lt, ht, rtx, rty = torch.split(
box_encodings, 1, dim=-1)
else:
xt, yt, zt, wt, lt, ht, rt = torch.split(box_encodings, 1, dim=-1)
# xt, yt, zt, wt, lt, ht, rt = torch.split(box_encodings, 1, dim=-1)
za = za + ha / 2
diagonal = torch.sqrt(la**2 + wa**2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * ha + za
if smooth_dim:
lg = (lt + 1) * la
wg = (wt + 1) * wa
hg = (ht + 1) * ha
else:
lg = torch.exp(lt) * la
wg = torch.exp(wt) * wa
hg = torch.exp(ht) * ha
if encode_angle_to_vector:
rax = torch.cos(ra)
ray = torch.sin(ra)
rgx = rtx + rax
rgy = rty + ray
rg = torch.atan2(rgy, rgx)
else:
rg = rt + ra
zg = zg - hg / 2
return torch.cat([xg, yg, zg, wg, lg, hg, rg], dim=-1)
def bev_box_encode(boxes, anchors, encode_angle_to_vector=False, smooth_dim=False):
"""box encode for VoxelNet in lidar
Args:
boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r
anchors ([N, 7] Tensor): anchors
encode_angle_to_vector: bool. increase aos performance,
decrease other performance.
"""
# need to convert boxes to z-center format
xa, ya, wa, la, ra = np.split(anchors, 5, axis=-1)
xg, yg, wg, lg, rg = np.split(boxes, 5, axis=-1)
diagonal = np.sqrt(la**2 + wa**2) # 4.3
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
if smooth_dim:
lt = lg / la - 1
wt = wg / wa - 1
else:
lt = np.log(lg / la)
wt = np.log(wg / wa)
if encode_angle_to_vector:
rgx = np.cos(rg)
rgy = np.sin(rg)
rax = np.cos(ra)
ray = np.sin(ra)
rtx = rgx - rax
rty = rgy - ray
return np.concatenate([xt, yt, wt, lt, rtx, rty], axis=-1)
else:
rt = rg - ra
return np.concatenate([xt, yt, wt, lt, rt], axis=-1)
def bev_box_decode(box_encodings, anchors, encode_angle_to_vector=False, smooth_dim=False):
"""box decode for VoxelNet in lidar
Args:
boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r
anchors ([N, 7] Tensor): anchors
"""
# need to convert box_encodings to z-bottom format
xa, ya, wa, la, ra = np.split(anchors, 5, axis=-1)
if encode_angle_to_vector:
xt, yt, wt, lt, rtx, rty = np.split(box_encodings, 6, axis=-1)
else:
xt, yt, wt, lt, rt = np.split(box_encodings, 5, axis=-1)
diagonal = np.sqrt(la**2 + wa**2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
if smooth_dim:
lg = (lt + 1) * la
wg = (wt + 1) * wa
else:
lg = np.exp(lt) * la
wg = np.exp(wt) * wa
if encode_angle_to_vector:
rax = np.cos(ra)
ray = np.sin(ra)
rgx = rtx + rax
rgy = rty + ray
rg = np.arctan2(rgy, rgx)
else:
rg = rt + ra
return np.concatenate([xg, yg, wg, lg, rg], axis=-1)
class BoxCoder(object):
"""Abstract base class for box coder."""
__metaclass__ = ABCMeta
def code_size(self):
pass
def encode(self, boxes, anchors):
return self._encode(boxes, anchors)
def decode(self, rel_codes, anchors):
return self._decode(rel_codes, anchors)
@abstractmethod
def _encode(self, boxes, anchors):
pass
@abstractmethod
def _decode(self, rel_codes, anchors):
pass
class GroundBox3dCoder(BoxCoder):
def __init__(self, linear_dim=False, vec_encode=False):
super().__init__()
self.linear_dim = linear_dim
self.vec_encode = vec_encode
@property
def code_size(self):
return 8 if self.vec_encode else 7
def _encode(self, boxes, anchors):
return second_box_encode(boxes, anchors, self.vec_encode, self.linear_dim)
def _decode(self, encodings, anchors):
return second_box_decode(encodings, anchors, self.vec_encode, self.linear_dim)
class BevBoxCoder(BoxCoder):
"""WARNING: this coder will return encoding with size=5, but
takes size=7 boxes, anchors
"""
def __init__(self, linear_dim=False, vec_encode=False, z_fixed=-1.0, h_fixed=2.0):
super().__init__()
self.linear_dim = linear_dim
self.z_fixed = z_fixed
self.h_fixed = h_fixed
self.vec_encode = vec_encode
@property
def code_size(self):
return 6 if self.vec_encode else 5
def _encode(self, boxes, anchors):
anchors = anchors[..., [0, 1, 3, 4, 6]]
boxes = boxes[..., [0, 1, 3, 4, 6]]
return bev_box_encode(boxes, anchors, self.vec_encode, self.linear_dim)
def _decode(self, encodings, anchors):
anchors = anchors[..., [0, 1, 3, 4, 6]]
ret = bev_box_decode(encodings, anchors, self.vec_encode, self.linear_dim)
z_fixed = np.full([*ret.shape[:-1], 1], self.z_fixed, dtype=ret.dtype)
h_fixed = np.full([*ret.shape[:-1], 1], self.h_fixed, dtype=ret.dtype)
return np.concatenate([ret[..., :2], z_fixed, ret[..., 2:4], h_fixed, ret[..., 4:]], axis=-1)
class BoxCornerCoder(BoxCoder):
def __init__(self, ):
super(BoxCornerCoder).__init__()
@property
def code_size(self):
return 24
def _encode(self, boxes, anchors):
assert len(boxes) == len(anchors)
N = len(boxes)
boxes = center_to_corner_box3d(boxes)
anchors = center_to_corner_box3d(anchors)
offset = boxes - anchors
return offset.reshape(N, -1)
def _decode(self, encodings, anchors):
NotImplementedError | [
"numpy.full",
"numpy.arctan2",
"numpy.log",
"torch.sqrt",
"torch.split",
"torch.atan2",
"torch.cat",
"numpy.split",
"torch.cos",
"torch.exp",
"numpy.sin",
"numpy.exp",
"numpy.cos",
"disprcnn.modeling.sassd_module.core.bbox3d.geometry.center_to_corner_box3d",
"torch.sin",
"numpy.concate... | [((610, 639), 'numpy.split', 'np.split', (['anchors', '(7)'], {'axis': '(-1)'}), '(anchors, 7, axis=-1)\n', (618, 639), True, 'import numpy as np\n'), ((673, 700), 'numpy.split', 'np.split', (['boxes', '(7)'], {'axis': '(-1)'}), '(boxes, 7, axis=-1)\n', (681, 700), True, 'import numpy as np\n'), ((758, 784), 'numpy.sqrt', 'np.sqrt', (['(la ** 2 + wa ** 2)'], {}), '(la ** 2 + wa ** 2)\n', (765, 784), True, 'import numpy as np\n'), ((1718, 1749), 'torch.split', 'torch.split', (['anchors', '(1)'], {'dim': '(-1)'}), '(anchors, 1, dim=-1)\n', (1729, 1749), False, 'import torch\n'), ((2071, 2100), 'torch.sqrt', 'torch.sqrt', (['(la ** 2 + wa ** 2)'], {}), '(la ** 2 + wa ** 2)\n', (2081, 2100), False, 'import torch\n'), ((2615, 2662), 'torch.cat', 'torch.cat', (['[xg, yg, zg, wg, lg, hg, rg]'], {'dim': '(-1)'}), '([xg, yg, zg, wg, lg, hg, rg], dim=-1)\n', (2624, 2662), False, 'import torch\n'), ((3088, 3117), 'numpy.split', 'np.split', (['anchors', '(5)'], {'axis': '(-1)'}), '(anchors, 5, axis=-1)\n', (3096, 3117), True, 'import numpy as np\n'), ((3143, 3170), 'numpy.split', 'np.split', (['boxes', '(5)'], {'axis': '(-1)'}), '(boxes, 5, axis=-1)\n', (3151, 3170), True, 'import numpy as np\n'), ((3186, 3212), 'numpy.sqrt', 'np.sqrt', (['(la ** 2 + wa ** 2)'], {}), '(la ** 2 + wa ** 2)\n', (3193, 3212), True, 'import numpy as np\n'), ((4088, 4117), 'numpy.split', 'np.split', (['anchors', '(5)'], {'axis': '(-1)'}), '(anchors, 5, axis=-1)\n', (4096, 4117), True, 'import numpy as np\n'), ((4310, 4336), 'numpy.sqrt', 'np.sqrt', (['(la ** 2 + wa ** 2)'], {}), '(la ** 2 + wa ** 2)\n', (4317, 4336), True, 'import numpy as np\n'), ((4735, 4780), 'numpy.concatenate', 'np.concatenate', (['[xg, yg, wg, lg, rg]'], {'axis': '(-1)'}), '([xg, yg, wg, lg, rg], axis=-1)\n', (4749, 4780), True, 'import numpy as np\n'), ((997, 1012), 'numpy.log', 'np.log', (['(lg / la)'], {}), '(lg / la)\n', (1003, 1012), True, 'import numpy as np\n'), ((1026, 1041), 'numpy.log', 'np.log', (['(wg / wa)'], {}), '(wg / wa)\n', (1032, 1041), True, 'import numpy as np\n'), ((1055, 1070), 'numpy.log', 'np.log', (['(hg / ha)'], {}), '(hg / ha)\n', (1061, 1070), True, 'import numpy as np\n'), ((1116, 1126), 'numpy.cos', 'np.cos', (['rg'], {}), '(rg)\n', (1122, 1126), True, 'import numpy as np\n'), ((1141, 1151), 'numpy.sin', 'np.sin', (['rg'], {}), '(rg)\n', (1147, 1151), True, 'import numpy as np\n'), ((1166, 1176), 'numpy.cos', 'np.cos', (['ra'], {}), '(ra)\n', (1172, 1176), True, 'import numpy as np\n'), ((1191, 1201), 'numpy.sin', 'np.sin', (['ra'], {}), '(ra)\n', (1197, 1201), True, 'import numpy as np\n'), ((1265, 1324), 'numpy.concatenate', 'np.concatenate', (['[xt, yt, zt, wt, lt, ht, rtx, rty]'], {'axis': '(-1)'}), '([xt, yt, zt, wt, lt, ht, rtx, rty], axis=-1)\n', (1279, 1324), True, 'import numpy as np\n'), ((1371, 1424), 'numpy.concatenate', 'np.concatenate', (['[xt, yt, zt, wt, lt, ht, rt]'], {'axis': '(-1)'}), '([xt, yt, zt, wt, lt, ht, rt], axis=-1)\n', (1385, 1424), True, 'import numpy as np\n'), ((1824, 1861), 'torch.split', 'torch.split', (['box_encodings', '(1)'], {'dim': '(-1)'}), '(box_encodings, 1, dim=-1)\n', (1835, 1861), False, 'import torch\n'), ((1923, 1960), 'torch.split', 'torch.split', (['box_encodings', '(1)'], {'dim': '(-1)'}), '(box_encodings, 1, dim=-1)\n', (1934, 1960), False, 'import torch\n'), ((2427, 2440), 'torch.cos', 'torch.cos', (['ra'], {}), '(ra)\n', (2436, 2440), False, 'import torch\n'), ((2455, 2468), 'torch.sin', 'torch.sin', (['ra'], {}), '(ra)\n', (2464, 2468), False, 'import torch\n'), ((2530, 2551), 'torch.atan2', 'torch.atan2', (['rgy', 'rgx'], {}), '(rgy, rgx)\n', (2541, 2551), False, 'import torch\n'), ((3368, 3383), 'numpy.log', 'np.log', (['(lg / la)'], {}), '(lg / la)\n', (3374, 3383), True, 'import numpy as np\n'), ((3397, 3412), 'numpy.log', 'np.log', (['(wg / wa)'], {}), '(wg / wa)\n', (3403, 3412), True, 'import numpy as np\n'), ((3458, 3468), 'numpy.cos', 'np.cos', (['rg'], {}), '(rg)\n', (3464, 3468), True, 'import numpy as np\n'), ((3483, 3493), 'numpy.sin', 'np.sin', (['rg'], {}), '(rg)\n', (3489, 3493), True, 'import numpy as np\n'), ((3508, 3518), 'numpy.cos', 'np.cos', (['ra'], {}), '(ra)\n', (3514, 3518), True, 'import numpy as np\n'), ((3533, 3543), 'numpy.sin', 'np.sin', (['ra'], {}), '(ra)\n', (3539, 3543), True, 'import numpy as np\n'), ((3607, 3658), 'numpy.concatenate', 'np.concatenate', (['[xt, yt, wt, lt, rtx, rty]'], {'axis': '(-1)'}), '([xt, yt, wt, lt, rtx, rty], axis=-1)\n', (3621, 3658), True, 'import numpy as np\n'), ((3705, 3750), 'numpy.concatenate', 'np.concatenate', (['[xt, yt, wt, lt, rt]'], {'axis': '(-1)'}), '([xt, yt, wt, lt, rt], axis=-1)\n', (3719, 3750), True, 'import numpy as np\n'), ((4184, 4219), 'numpy.split', 'np.split', (['box_encodings', '(6)'], {'axis': '(-1)'}), '(box_encodings, 6, axis=-1)\n', (4192, 4219), True, 'import numpy as np\n'), ((4259, 4294), 'numpy.split', 'np.split', (['box_encodings', '(5)'], {'axis': '(-1)'}), '(box_encodings, 5, axis=-1)\n', (4267, 4294), True, 'import numpy as np\n'), ((4575, 4585), 'numpy.cos', 'np.cos', (['ra'], {}), '(ra)\n', (4581, 4585), True, 'import numpy as np\n'), ((4600, 4610), 'numpy.sin', 'np.sin', (['ra'], {}), '(ra)\n', (4606, 4610), True, 'import numpy as np\n'), ((4672, 4692), 'numpy.arctan2', 'np.arctan2', (['rgy', 'rgx'], {}), '(rgy, rgx)\n', (4682, 4692), True, 'import numpy as np\n'), ((6651, 6711), 'numpy.full', 'np.full', (['[*ret.shape[:-1], 1]', 'self.z_fixed'], {'dtype': 'ret.dtype'}), '([*ret.shape[:-1], 1], self.z_fixed, dtype=ret.dtype)\n', (6658, 6711), True, 'import numpy as np\n'), ((6730, 6790), 'numpy.full', 'np.full', (['[*ret.shape[:-1], 1]', 'self.h_fixed'], {'dtype': 'ret.dtype'}), '([*ret.shape[:-1], 1], self.h_fixed, dtype=ret.dtype)\n', (6737, 6790), True, 'import numpy as np\n'), ((6806, 6897), 'numpy.concatenate', 'np.concatenate', (['[ret[..., :2], z_fixed, ret[..., 2:4], h_fixed, ret[..., 4:]]'], {'axis': '(-1)'}), '([ret[..., :2], z_fixed, ret[..., 2:4], h_fixed, ret[..., 4:]\n ], axis=-1)\n', (6820, 6897), True, 'import numpy as np\n'), ((7173, 7202), 'disprcnn.modeling.sassd_module.core.bbox3d.geometry.center_to_corner_box3d', 'center_to_corner_box3d', (['boxes'], {}), '(boxes)\n', (7195, 7202), False, 'from disprcnn.modeling.sassd_module.core.bbox3d.geometry import center_to_corner_box3d\n'), ((7221, 7252), 'disprcnn.modeling.sassd_module.core.bbox3d.geometry.center_to_corner_box3d', 'center_to_corner_box3d', (['anchors'], {}), '(anchors)\n', (7243, 7252), False, 'from disprcnn.modeling.sassd_module.core.bbox3d.geometry import center_to_corner_box3d\n'), ((2299, 2312), 'torch.exp', 'torch.exp', (['lt'], {}), '(lt)\n', (2308, 2312), False, 'import torch\n'), ((2331, 2344), 'torch.exp', 'torch.exp', (['wt'], {}), '(wt)\n', (2340, 2344), False, 'import torch\n'), ((2363, 2376), 'torch.exp', 'torch.exp', (['ht'], {}), '(ht)\n', (2372, 2376), False, 'import torch\n'), ((4485, 4495), 'numpy.exp', 'np.exp', (['lt'], {}), '(lt)\n', (4491, 4495), True, 'import numpy as np\n'), ((4514, 4524), 'numpy.exp', 'np.exp', (['wt'], {}), '(wt)\n', (4520, 4524), True, 'import numpy as np\n')] |
import numpy as np
from scipy.sparse import csr_matrix
from scipy.sparse.linalg import spsolve
from Test2 import localmatrix2
import StiffnessNewCyth
def DisplacementCy2(NM, Modules, TransT, Transmatrix, NDOF, MemberCOORDNum, L, Pf,
MemberProp, Local_Matrix, COORDNum, x, AgrD, DNumber, Agr1, Agr2):
# Local Stiffness Matrix
Local_Matrix = localmatrix2(Local_Matrix, MemberProp[:, 0], MemberProp[:, 1], MemberProp[:, 2],
MemberProp[:, 3], Modules[:, 0], Modules[:, 1], L)
# Global Stiffness Matrix
Global_Matrix = np.einsum('ijk,ikl ->ijl', TransT, Local_Matrix)
Global_Matrix = np.einsum('ijk,ikl ->ijl', Global_Matrix, Transmatrix)
# Stiffness Matrix
Number = NM * 12 * 12
row = np.zeros(Number, dtype=np.intc)
col = np.zeros(Number, dtype=np.intc)
data = np.zeros(Number)
StiffnessNewCyth.NewStiff(NDOF - 1, Global_Matrix, NM, MemberCOORDNum, row, col, data)
# Solve for joint Displacements
ST = csr_matrix((data, (row, col)), shape=(NDOF, NDOF))
displacement = spsolve(ST, Pf)
Agr1[0] = displacement[COORDNum[DNumber[0], 1]]
Agr2[0] = displacement[COORDNum[DNumber[1], 1]]
AgrD[x] = displacement[COORDNum[DNumber[0], 1]] + displacement[COORDNum[DNumber[1], 1]]
return displacement, Agr1, Agr2
| [
"Test2.localmatrix2",
"numpy.zeros",
"numpy.einsum",
"StiffnessNewCyth.NewStiff",
"scipy.sparse.csr_matrix",
"scipy.sparse.linalg.spsolve"
] | [((384, 519), 'Test2.localmatrix2', 'localmatrix2', (['Local_Matrix', 'MemberProp[:, 0]', 'MemberProp[:, 1]', 'MemberProp[:, 2]', 'MemberProp[:, 3]', 'Modules[:, 0]', 'Modules[:, 1]', 'L'], {}), '(Local_Matrix, MemberProp[:, 0], MemberProp[:, 1], MemberProp[:,\n 2], MemberProp[:, 3], Modules[:, 0], Modules[:, 1], L)\n', (396, 519), False, 'from Test2 import localmatrix2\n'), ((603, 651), 'numpy.einsum', 'np.einsum', (['"""ijk,ikl ->ijl"""', 'TransT', 'Local_Matrix'], {}), "('ijk,ikl ->ijl', TransT, Local_Matrix)\n", (612, 651), True, 'import numpy as np\n'), ((673, 727), 'numpy.einsum', 'np.einsum', (['"""ijk,ikl ->ijl"""', 'Global_Matrix', 'Transmatrix'], {}), "('ijk,ikl ->ijl', Global_Matrix, Transmatrix)\n", (682, 727), True, 'import numpy as np\n'), ((792, 823), 'numpy.zeros', 'np.zeros', (['Number'], {'dtype': 'np.intc'}), '(Number, dtype=np.intc)\n', (800, 823), True, 'import numpy as np\n'), ((835, 866), 'numpy.zeros', 'np.zeros', (['Number'], {'dtype': 'np.intc'}), '(Number, dtype=np.intc)\n', (843, 866), True, 'import numpy as np\n'), ((879, 895), 'numpy.zeros', 'np.zeros', (['Number'], {}), '(Number)\n', (887, 895), True, 'import numpy as np\n'), ((901, 991), 'StiffnessNewCyth.NewStiff', 'StiffnessNewCyth.NewStiff', (['(NDOF - 1)', 'Global_Matrix', 'NM', 'MemberCOORDNum', 'row', 'col', 'data'], {}), '(NDOF - 1, Global_Matrix, NM, MemberCOORDNum, row,\n col, data)\n', (926, 991), False, 'import StiffnessNewCyth\n'), ((1037, 1087), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(data, (row, col))'], {'shape': '(NDOF, NDOF)'}), '((data, (row, col)), shape=(NDOF, NDOF))\n', (1047, 1087), False, 'from scipy.sparse import csr_matrix\n'), ((1108, 1123), 'scipy.sparse.linalg.spsolve', 'spsolve', (['ST', 'Pf'], {}), '(ST, Pf)\n', (1115, 1123), False, 'from scipy.sparse.linalg import spsolve\n')] |
# Author: <NAME>
# https://sites.google.com/site/professorlucianodaniel
from scipy.io import savemat
from numpy import random
from numpy import linalg
import time
def pause():
input("Press the <ENTER> key to continue...")
dim = int(input('Dimension of the square random matrix:'))
A = random.rand(dim, dim)
print(A, '\n')
print()
savemat('calc_autovalores_01.mat', {'A': A})
t = time.time()
w = linalg.eigvals(A)
elapsed = time.time() - t
print(w, '\n')
print('EIG elapsed time in PYTHON (executable) is:', elapsed, 'seconds', '\n')
savemat('calc_autovalores_02.mat', {'w': w})
print('"I have a paper afloat which I hold to be great guns" (Maxwell, J.C.)', '\n')
pause()
| [
"numpy.random.rand",
"numpy.linalg.eigvals",
"scipy.io.savemat",
"time.time"
] | [((308, 329), 'numpy.random.rand', 'random.rand', (['dim', 'dim'], {}), '(dim, dim)\n', (319, 329), False, 'from numpy import random\n'), ((356, 400), 'scipy.io.savemat', 'savemat', (['"""calc_autovalores_01.mat"""', "{'A': A}"], {}), "('calc_autovalores_01.mat', {'A': A})\n", (363, 400), False, 'from scipy.io import savemat\n'), ((406, 417), 'time.time', 'time.time', ([], {}), '()\n', (415, 417), False, 'import time\n'), ((423, 440), 'numpy.linalg.eigvals', 'linalg.eigvals', (['A'], {}), '(A)\n', (437, 440), False, 'from numpy import linalg\n'), ((565, 609), 'scipy.io.savemat', 'savemat', (['"""calc_autovalores_02.mat"""', "{'w': w}"], {}), "('calc_autovalores_02.mat', {'w': w})\n", (572, 609), False, 'from scipy.io import savemat\n'), ((452, 463), 'time.time', 'time.time', ([], {}), '()\n', (461, 463), False, 'import time\n')] |
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.docstring as docstring
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
from matplotlib.axes._base import _AxesBase
def _make_secondary_locator(rect, parent):
"""
Helper function to locate the secondary axes.
A locator gets used in `Axes.set_aspect` to override the default
locations... It is a function that takes an axes object and
a renderer and tells `set_aspect` where it is to be placed.
This locator make the transform be in axes-relative co-coordinates
because that is how we specify the "location" of the secondary axes.
Here *rect* is a rectangle [l, b, w, h] that specifies the
location for the axes in the transform given by *trans* on the
*parent*.
"""
_rect = mtransforms.Bbox.from_bounds(*rect)
def secondary_locator(ax, renderer):
# delay evaluating transform until draw time because the
# parent transform may have changed (i.e. if window reesized)
bb = mtransforms.TransformedBbox(_rect, parent.transAxes)
tr = parent.figure.transFigure.inverted()
bb = mtransforms.TransformedBbox(bb, tr)
return bb
return secondary_locator
class SecondaryAxis(_AxesBase):
"""
General class to hold a Secondary_X/Yaxis.
"""
def __init__(self, parent, orientation,
location, functions, **kwargs):
"""
See `.secondary_xaxis` and `.secondary_yaxis` for the doc string.
While there is no need for this to be private, it should really be
called by those higher level functions.
"""
self._functions = functions
self._parent = parent
self._orientation = orientation
self._ticks_set = False
if self._orientation == 'x':
super().__init__(self._parent.figure, [0, 1., 1, 0.0001], **kwargs)
self._axis = self.xaxis
self._locstrings = ['top', 'bottom']
self._otherstrings = ['left', 'right']
elif self._orientation == 'y':
super().__init__(self._parent.figure, [0, 1., 0.0001, 1], **kwargs)
self._axis = self.yaxis
self._locstrings = ['right', 'left']
self._otherstrings = ['top', 'bottom']
self._parentscale = self._axis.get_scale()
# this gets positioned w/o constrained_layout so exclude:
self._layoutbox = None
self._poslayoutbox = None
self.set_location(location)
self.set_functions(functions)
# styling:
if self._orientation == 'x':
otheraxis = self.yaxis
else:
otheraxis = self.xaxis
otheraxis.set_major_locator(mticker.NullLocator())
otheraxis.set_ticks_position('none')
for st in self._otherstrings:
self.spines[st].set_visible(False)
for st in self._locstrings:
self.spines[st].set_visible(True)
if self._pos < 0.5:
# flip the location strings...
self._locstrings = self._locstrings[::-1]
self.set_alignment(self._locstrings[0])
def set_alignment(self, align):
"""
Set if axes spine and labels are drawn at top or bottom (or left/right)
of the axes.
Parameters
----------
align : str
either 'top' or 'bottom' for orientation='x' or
'left' or 'right' for orientation='y' axis.
"""
if align in self._locstrings:
if align == self._locstrings[1]:
# need to change the orientation.
self._locstrings = self._locstrings[::-1]
elif align != self._locstrings[0]:
raise ValueError('"{}" is not a valid axis orientation, '
'not changing the orientation;'
'choose "{}" or "{}""'.format(align,
self._locstrings[0], self._locstrings[1]))
self.spines[self._locstrings[0]].set_visible(True)
self.spines[self._locstrings[1]].set_visible(False)
self._axis.set_ticks_position(align)
self._axis.set_label_position(align)
def set_location(self, location):
"""
Set the vertical or horizontal location of the axes in
parent-normalized co-ordinates.
Parameters
----------
location : {'top', 'bottom', 'left', 'right'} or float
The position to put the secondary axis. Strings can be 'top' or
'bottom' for orientation='x' and 'right' or 'left' for
orientation='y'. A float indicates the relative position on the
parent axes to put the new axes, 0.0 being the bottom (or left)
and 1.0 being the top (or right).
"""
# This puts the rectangle into figure-relative coordinates.
if isinstance(location, str):
if location in ['top', 'right']:
self._pos = 1.
elif location in ['bottom', 'left']:
self._pos = 0.
else:
raise ValueError("location must be '{}', '{}', or a "
"float, not '{}'".format(location,
self._locstrings[0], self._locstrings[1]))
else:
self._pos = location
self._loc = location
if self._orientation == 'x':
bounds = [0, self._pos, 1., 1e-10]
else:
bounds = [self._pos, 0, 1e-10, 1]
secondary_locator = _make_secondary_locator(bounds, self._parent)
# this locator lets the axes move in the parent axes coordinates.
# so it never needs to know where the parent is explicitly in
# figure co-ordinates.
# it gets called in `ax.apply_aspect() (of all places)
self.set_axes_locator(secondary_locator)
def apply_aspect(self, position=None):
# docstring inherited.
self._set_lims()
super().apply_aspect(position)
@cbook._make_keyword_only("3.2", "minor")
def set_ticks(self, ticks, minor=False):
"""
Set the x ticks with list of *ticks*
Parameters
----------
ticks : list
List of x-axis tick locations.
minor : bool, optional
If ``False`` sets major ticks, if ``True`` sets minor ticks.
Default is ``False``.
"""
ret = self._axis.set_ticks(ticks, minor=minor)
self.stale = True
self._ticks_set = True
return ret
def set_functions(self, functions):
"""
Set how the secondary axis converts limits from the parent axes.
Parameters
----------
functions : 2-tuple of func, or `Transform` with an inverse.
Transform between the parent axis values and the secondary axis
values.
If supplied as a 2-tuple of functions, the first function is
the forward transform function and the second is the inverse
transform.
If a transform is supplied, then the transform must have an
inverse.
"""
if self._orientation == 'x':
set_scale = self.set_xscale
parent_scale = self._parent.get_xscale()
else:
set_scale = self.set_yscale
parent_scale = self._parent.get_yscale()
# we need to use a modified scale so the scale can receive the
# transform. Only types supported are linear and log10 for now.
# Probably possible to add other transforms as a todo...
if parent_scale == 'log':
defscale = 'functionlog'
else:
defscale = 'function'
if (isinstance(functions, tuple) and len(functions) == 2 and
callable(functions[0]) and callable(functions[1])):
# make an arbitrary convert from a two-tuple of functions
# forward and inverse.
self._functions = functions
elif functions is None:
self._functions = (lambda x: x, lambda x: x)
else:
raise ValueError('functions argument of secondary axes '
'must be a two-tuple of callable functions '
'with the first function being the transform '
'and the second being the inverse')
# need to invert the roles here for the ticks to line up.
set_scale(defscale, functions=self._functions[::-1])
def draw(self, renderer=None, inframe=False):
"""
Draw the secondary axes.
Consults the parent axes for its limits and converts them
using the converter specified by
`~.axes._secondary_axes.set_functions` (or *functions*
parameter when axes initialized.)
"""
self._set_lims()
# this sets the scale in case the parent has set its scale.
self._set_scale()
super().draw(renderer=renderer, inframe=inframe)
def _set_scale(self):
"""
Check if parent has set its scale
"""
if self._orientation == 'x':
pscale = self._parent.xaxis.get_scale()
set_scale = self.set_xscale
if self._orientation == 'y':
pscale = self._parent.yaxis.get_scale()
set_scale = self.set_yscale
if pscale == self._parentscale:
return
else:
self._parentscale = pscale
if pscale == 'log':
defscale = 'functionlog'
else:
defscale = 'function'
if self._ticks_set:
ticks = self._axis.get_ticklocs()
# need to invert the roles here for the ticks to line up.
set_scale(defscale, functions=self._functions[::-1])
# OK, set_scale sets the locators, but if we've called
# axsecond.set_ticks, we want to keep those.
if self._ticks_set:
self._axis.set_major_locator(mticker.FixedLocator(ticks))
def _set_lims(self):
"""
Set the limits based on parent limits and the convert method
between the parent and this secondary axes.
"""
if self._orientation == 'x':
lims = self._parent.get_xlim()
set_lim = self.set_xlim
if self._orientation == 'y':
lims = self._parent.get_ylim()
set_lim = self.set_ylim
order = lims[0] < lims[1]
lims = self._functions[0](np.array(lims))
neworder = lims[0] < lims[1]
if neworder != order:
# Flip because the transform will take care of the flipping.
lims = lims[::-1]
set_lim(lims)
def set_aspect(self, *args, **kwargs):
"""
Secondary axes cannot set the aspect ratio, so calling this just
sets a warning.
"""
cbook._warn_external("Secondary axes can't set the aspect ratio")
def set_xlabel(self, xlabel, fontdict=None, labelpad=None, **kwargs):
"""
Set the label for the x-axis.
Parameters
----------
xlabel : str
The label text.
labelpad : scalar, optional, default: None
Spacing in points between the label and the x-axis.
Other Parameters
----------------
**kwargs : `.Text` properties
`.Text` properties control the appearance of the label.
See also
--------
text : for information on how override and the optional args work
"""
if labelpad is not None:
self.xaxis.labelpad = labelpad
return self.xaxis.set_label_text(xlabel, fontdict, **kwargs)
def set_ylabel(self, ylabel, fontdict=None, labelpad=None, **kwargs):
"""
Set the label for the x-axis.
Parameters
----------
ylabel : str
The label text.
labelpad : scalar, optional, default: None
Spacing in points between the label and the x-axis.
Other Parameters
----------------
**kwargs : `.Text` properties
`.Text` properties control the appearance of the label.
See also
--------
text : for information on how override and the optional args work
"""
if labelpad is not None:
self.yaxis.labelpad = labelpad
return self.yaxis.set_label_text(ylabel, fontdict, **kwargs)
def set_color(self, color):
"""
Change the color of the secondary axes and all decorators.
Parameters
----------
color : Matplotlib color
"""
if self._orientation == 'x':
self.tick_params(axis='x', colors=color)
self.spines['bottom'].set_color(color)
self.spines['top'].set_color(color)
self.xaxis.label.set_color(color)
else:
self.tick_params(axis='y', colors=color)
self.spines['left'].set_color(color)
self.spines['right'].set_color(color)
self.yaxis.label.set_color(color)
_secax_docstring = '''
Warnings
--------
This method is experimental as of 3.1, and the API may change.
Parameters
----------
location : {'top', 'bottom', 'left', 'right'} or float
The position to put the secondary axis. Strings can be 'top' or
'bottom' for orientation='x' and 'right' or 'left' for
orientation='y'. A float indicates the relative position on the
parent axes to put the new axes, 0.0 being the bottom (or left)
and 1.0 being the top (or right).
functions : 2-tuple of func, or Transform with an inverse
If a 2-tuple of functions, the user specifies the transform
function and its inverse. i.e.
`functions=(lambda x: 2 / x, lambda x: 2 / x)` would be an
reciprocal transform with a factor of 2.
The user can also directly supply a subclass of
`.transforms.Transform` so long as it has an inverse.
See :doc:`/gallery/subplots_axes_and_figures/secondary_axis`
for examples of making these conversions.
Other Parameters
----------------
**kwargs : `~matplotlib.axes.Axes` properties.
Other miscellaneous axes parameters.
Returns
-------
ax : axes._secondary_axes.SecondaryAxis
'''
docstring.interpd.update(_secax_docstring=_secax_docstring)
| [
"matplotlib.cbook._make_keyword_only",
"matplotlib.transforms.TransformedBbox",
"matplotlib.transforms.Bbox.from_bounds",
"matplotlib.ticker.FixedLocator",
"matplotlib.cbook._warn_external",
"numpy.array",
"matplotlib.docstring.interpd.update",
"matplotlib.ticker.NullLocator"
] | [((14292, 14351), 'matplotlib.docstring.interpd.update', 'docstring.interpd.update', ([], {'_secax_docstring': '_secax_docstring'}), '(_secax_docstring=_secax_docstring)\n', (14316, 14351), True, 'import matplotlib.docstring as docstring\n'), ((830, 865), 'matplotlib.transforms.Bbox.from_bounds', 'mtransforms.Bbox.from_bounds', (['*rect'], {}), '(*rect)\n', (858, 865), True, 'import matplotlib.transforms as mtransforms\n'), ((6080, 6120), 'matplotlib.cbook._make_keyword_only', 'cbook._make_keyword_only', (['"""3.2"""', '"""minor"""'], {}), "('3.2', 'minor')\n", (6104, 6120), True, 'import matplotlib.cbook as cbook\n'), ((1055, 1107), 'matplotlib.transforms.TransformedBbox', 'mtransforms.TransformedBbox', (['_rect', 'parent.transAxes'], {}), '(_rect, parent.transAxes)\n', (1082, 1107), True, 'import matplotlib.transforms as mtransforms\n'), ((1171, 1206), 'matplotlib.transforms.TransformedBbox', 'mtransforms.TransformedBbox', (['bb', 'tr'], {}), '(bb, tr)\n', (1198, 1206), True, 'import matplotlib.transforms as mtransforms\n'), ((10917, 10982), 'matplotlib.cbook._warn_external', 'cbook._warn_external', (['"""Secondary axes can\'t set the aspect ratio"""'], {}), '("Secondary axes can\'t set the aspect ratio")\n', (10937, 10982), True, 'import matplotlib.cbook as cbook\n'), ((2751, 2772), 'matplotlib.ticker.NullLocator', 'mticker.NullLocator', ([], {}), '()\n', (2770, 2772), True, 'import matplotlib.ticker as mticker\n'), ((10536, 10550), 'numpy.array', 'np.array', (['lims'], {}), '(lims)\n', (10544, 10550), True, 'import numpy as np\n'), ((10036, 10063), 'matplotlib.ticker.FixedLocator', 'mticker.FixedLocator', (['ticks'], {}), '(ticks)\n', (10056, 10063), True, 'import matplotlib.ticker as mticker\n')] |
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
def _split_cols(mat, lengths):
"""Split a 2D matrix to variable length columns."""
assert mat.size()[1] == sum(lengths), "Lengths must be summed to num columns"
l = np.cumsum([0] + lengths)
results = []
for s, e in zip(l[:-1], l[1:]):
results += [mat[:, s:e]]
return results
class NTM_Head(nn.Module):
def __init__(self, address_count, address_dimension,
controller_output_size):
super(NTM_Head, self).__init__()
self.controller_output_size = controller_output_size
self.N = address_count
self.M = address_dimension
def is_read_head(self):
raise NotImplementedError
def reset_parameters(self):
raise NotImplementedError
def initialize_state(self):
raise NotImplementedError
class NTM_Read_Head(NTM_Head):
def __init__(self, address_count, address_dimension, controller_output_size, batch_size):
super(NTM_Read_Head, self).__init__(address_count, address_dimension, controller_output_size)
self.read_parameters_lengths = [self.M, 1, 1, 3, 1]
self.fc_read_parameters = nn.Linear(controller_output_size, sum(self.read_parameters_lengths))
self.batch_size = batch_size
self.reset_parameters()
self.initialize_state()
def reset_parameters(self):
nn.init.xavier_uniform(self.fc_read_parameters.weight, gain=1.4)
nn.init.normal(self.fc_read_parameters.bias, std=0.01)
self.initial_address_vec = nn.Parameter(torch.zeros(self.N))
self.initial_read = nn.Parameter(torch.randn(1, self.M) * 0.01)
def initialize_state(self):
self.prev_address_vec = self.initial_address_vec.clone()
self.prev_read = self.initial_read.repeat(self.batch_size, 1)
def is_read_head(self):
return True
def forward(self, x, memory):
read_parameters = self.fc_read_parameters(x)
key_vec, β, g, s, γ = _split_cols(read_parameters, self.read_parameters_lengths)
β = F.softplus(β)
g = F.sigmoid(g)
s = F.softmax(s, dim=1)
γ = 1 + F.softplus(γ)
self.prev_address_vec = memory.address_memory(key_vec, self.prev_address_vec, β, g, s, γ)
new_read = memory.read_memory(self.prev_address_vec)
self.prev_read = new_read
return new_read
class NTM_Write_Head(NTM_Head):
def __init__(self, address_count, address_dimension, controller_output_size):
super(NTM_Write_Head, self).__init__(address_count, address_dimension, controller_output_size)
self.write_parameters_lengths = [self.M, 1, 1, 3, 1, self.M, self.M]
self.fc_write_parameters = nn.Linear(controller_output_size, sum(self.write_parameters_lengths))
self.reset_parameters()
self.initialize_state()
def reset_parameters(self):
nn.init.xavier_uniform(self.fc_write_parameters.weight, gain=1.4)
nn.init.normal(self.fc_write_parameters.bias, std=0.01)
self.initial_address_vec = nn.Parameter(torch.zeros(self.N))
def initialize_state(self):
self.prev_address_vec = self.initial_address_vec.clone()
def is_read_head(self):
return False
def forward(self, x, memory):
write_parameters = self.fc_write_parameters(x)
key_vec, β, g, s, γ, erase_vec, add_vec = _split_cols(write_parameters, self.write_parameters_lengths)
β = F.softplus(β)
g = F.sigmoid(g)
s = F.softmax(s, dim=1)
γ = 1 + F.softplus(γ)
erase_vec = F.sigmoid(erase_vec)
self.prev_address_vec = memory.address_memory(key_vec, self.prev_address_vec, β, g, s, γ)
memory.update_memory(self.prev_address_vec, erase_vec, add_vec) | [
"torch.randn",
"torch.nn.functional.softmax",
"torch.nn.init.xavier_uniform",
"numpy.cumsum",
"torch.nn.init.normal",
"torch.nn.functional.sigmoid",
"torch.zeros",
"torch.nn.functional.softplus"
] | [((265, 289), 'numpy.cumsum', 'np.cumsum', (['([0] + lengths)'], {}), '([0] + lengths)\n', (274, 289), True, 'import numpy as np\n'), ((1426, 1490), 'torch.nn.init.xavier_uniform', 'nn.init.xavier_uniform', (['self.fc_read_parameters.weight'], {'gain': '(1.4)'}), '(self.fc_read_parameters.weight, gain=1.4)\n', (1448, 1490), True, 'import torch.nn as nn\n'), ((1499, 1553), 'torch.nn.init.normal', 'nn.init.normal', (['self.fc_read_parameters.bias'], {'std': '(0.01)'}), '(self.fc_read_parameters.bias, std=0.01)\n', (1513, 1553), True, 'import torch.nn as nn\n'), ((2104, 2117), 'torch.nn.functional.softplus', 'F.softplus', (['β'], {}), '(β)\n', (2114, 2117), True, 'import torch.nn.functional as F\n'), ((2129, 2141), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['g'], {}), '(g)\n', (2138, 2141), True, 'import torch.nn.functional as F\n'), ((2154, 2173), 'torch.nn.functional.softmax', 'F.softmax', (['s'], {'dim': '(1)'}), '(s, dim=1)\n', (2163, 2173), True, 'import torch.nn.functional as F\n'), ((2930, 2995), 'torch.nn.init.xavier_uniform', 'nn.init.xavier_uniform', (['self.fc_write_parameters.weight'], {'gain': '(1.4)'}), '(self.fc_write_parameters.weight, gain=1.4)\n', (2952, 2995), True, 'import torch.nn as nn\n'), ((3004, 3059), 'torch.nn.init.normal', 'nn.init.normal', (['self.fc_write_parameters.bias'], {'std': '(0.01)'}), '(self.fc_write_parameters.bias, std=0.01)\n', (3018, 3059), True, 'import torch.nn as nn\n'), ((3493, 3506), 'torch.nn.functional.softplus', 'F.softplus', (['β'], {}), '(β)\n', (3503, 3506), True, 'import torch.nn.functional as F\n'), ((3518, 3530), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['g'], {}), '(g)\n', (3527, 3530), True, 'import torch.nn.functional as F\n'), ((3543, 3562), 'torch.nn.functional.softmax', 'F.softmax', (['s'], {'dim': '(1)'}), '(s, dim=1)\n', (3552, 3562), True, 'import torch.nn.functional as F\n'), ((3613, 3633), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['erase_vec'], {}), '(erase_vec)\n', (3622, 3633), True, 'import torch.nn.functional as F\n'), ((1603, 1622), 'torch.zeros', 'torch.zeros', (['self.N'], {}), '(self.N)\n', (1614, 1622), False, 'import torch\n'), ((2191, 2204), 'torch.nn.functional.softplus', 'F.softplus', (['γ'], {}), '(γ)\n', (2201, 2204), True, 'import torch.nn.functional as F\n'), ((3109, 3128), 'torch.zeros', 'torch.zeros', (['self.N'], {}), '(self.N)\n', (3120, 3128), False, 'import torch\n'), ((3580, 3593), 'torch.nn.functional.softplus', 'F.softplus', (['γ'], {}), '(γ)\n', (3590, 3593), True, 'import torch.nn.functional as F\n'), ((1665, 1687), 'torch.randn', 'torch.randn', (['(1)', 'self.M'], {}), '(1, self.M)\n', (1676, 1687), False, 'import torch\n')] |
#######################f###########################################################
#
# apogee.tools.read: read various APOGEE data files
#
# contains:
#
# - allStar: read the allStar.fits file
# - apogeeDesign: read the apogeeDesign file
# - apogeeField: read the apogeeField file
# - apogeeObject: read an apogeeObject file
# - apogeePlate: read the apogeePlate file
# - apokasc: read the APOKASC catalog
# - mainIndx: return the index of main targets in a data set
# - obslog: read the observation log
# - rcsample: read the red clump sample
#
##################################################################################
from functools import wraps
import os
import sys
import copy
import warnings
from operator import itemgetter
import numpy
import numpy.lib.recfunctions
from . import _apStarPixelLimits,_aspcapPixelLimits, elemIndx
try:
import esutil
_ESUTIL_LOADED= True
_ESUTIL_VERSION= [int(v.split('rc')[0])
for v in esutil.__version__.split('.')]
except ImportError:
_ESUTIL_LOADED= False
try:
import fitsio
fitsread= fitsio.read
fitswrite=fitsio.write
headerread=fitsio.read_header
_FITSIO_LOADED = True
except ImportError:
import astropy.io.fits as pyfits
fitsread= pyfits.getdata
fitswrite=pyfits.writeto
headerread=pyfits.getheader
_FITSIO_LOADED = False
import tqdm
from apogee.tools import path, paramIndx, download
from apogee.tools.path import change_dr # make this available here
_ERASESTR= " "
def modelspecOnApStarWavegrid(func):
"""Decorator to put a model spectrum onto the apStar wavelength grid"""
@wraps(func)
def output_wrapper(*args,**kwargs):
out= func(*args,**kwargs)
if kwargs.get('apStarWavegrid',True) \
or (kwargs.get('ext',-1) == 234 \
and kwargs.get('apStarWavegrid',True)):
if len(out.shape) == 2:
newOut= numpy.zeros((8575,out.shape[0]),dtype=out.dtype)\
+numpy.nan
out= out.T
else:
newOut= numpy.zeros(8575,dtype=out.dtype)+numpy.nan
apStarBlu_lo,apStarBlu_hi,apStarGre_lo,apStarGre_hi,apStarRed_lo,apStarRed_hi = _apStarPixelLimits(dr=None)
aspcapBlu_start,aspcapGre_start,aspcapRed_start,aspcapTotal = _aspcapPixelLimits(dr=None)
newOut[apStarBlu_lo:apStarBlu_hi]= out[:aspcapGre_start]
newOut[apStarGre_lo:apStarGre_hi]= out[aspcapGre_start:aspcapRed_start]
newOut[apStarRed_lo:apStarRed_hi]= out[aspcapRed_start:]
if len(out.shape) == 2:
out= newOut.T
else:
out= newOut
return out
return output_wrapper
def specOnAspcapWavegrid(func):
"""Decorator to put an APOGEE spectrum onto the ASPCAP wavelength grid"""
@wraps(func)
def output_wrapper(*args,**kwargs):
out= func(*args,**kwargs)
if kwargs.get('header',True):
out, hdr= out
if kwargs.get('aspcapWavegrid',False):
apStarBlu_lo,apStarBlu_hi,apStarGre_lo,apStarGre_hi,apStarRed_lo,apStarRed_hi = _apStarPixelLimits(dr=None)
aspcapBlu_start,aspcapGre_start,aspcapRed_start,aspcapTotal = _aspcapPixelLimits(dr=None)
if len(out.shape) == 2:
newOut= numpy.zeros((aspcapTotal,out.shape[0]),dtype=out.dtype)
if issubclass(out.dtype.type,numpy.float): newOut+= numpy.nan
out= out.T
else:
newOut= numpy.zeros(aspcapTotal,dtype=out.dtype)
if issubclass(out.dtype.type,numpy.float): newOut+= numpy.nan
newOut[:aspcapGre_start]= out[apStarBlu_lo:apStarBlu_hi]
newOut[aspcapGre_start:aspcapRed_start]= out[apStarGre_lo:apStarGre_hi]
newOut[aspcapRed_start:]= out[apStarRed_lo:apStarRed_hi]
if len(out.shape) == 2:
out= newOut.T
else:
out= newOut
if kwargs.get('header',True):
return (out,hdr)
else:
return out
return output_wrapper
def allStar(rmcommissioning=True,
main=False,
exclude_star_bad=False,
exclude_star_warn=False,
ak=True,
akvers='targ',
survey='all',
rmnovisits=False,
use_astroNN=False,
use_astroNN_abundances=False,
use_astroNN_distances=False,
use_astroNN_ages=False,
use_astroNN_orbits=False,
adddist=False,
distredux=None,
rmdups=False,
raw=False,
mjd=58104,
xmatch=None,
test=False,
dr=None,
**kwargs):
"""
NAME:
allStar
PURPOSE:
read the allStar file
INPUT:
rmcommissioning= (default: True) if True, only use data obtained after commissioning
main= (default: False) if True, only select stars in the main survey
exclude_star_bad= (False) if True, remove stars with the STAR_BAD flag set in ASPCAPFLAG
exclude_star_warn= (False) if True, remove stars with the STAR_WARN flag set in ASPCAPFLAG
ak= (default: True) only use objects for which dereddened mags exist
akvers= 'targ' (default) or 'wise': use target AK (AK_TARG) or AK derived from all-sky WISE (AK_WISE)
survey= ('all') When reading an APOGEE-2 allStar file, select stars from both APOGEE-1 and -2 ('all'), just APOGEE-1 ('apogee1'), or just APOGEE-2 ('apogee-2') [Note: both 'apogee1' and 'apogee2' exclude stars from the APO-1m]
rmnovisits= (False) if True, remove stars with no good visits (to go into the combined spectrum); shouldn't be necessary
use_astroNN= (False) if True, swap in astroNN (Leung & Bovy 2019a) parameters (get placed in, e.g., TEFF and TEFF_ERR), astroNN distances (Leung & Bovy 2019b), and astroNN ages (Mackereth, Bovy, Leung, et al. 2019)
use_astroNN_abundances= (False) only swap in astroNN parameters and abundances, not distances and ages
use_astroNN_distances= (False) only swap in astroNN distances, not parameters and abundances and ages
use_astroNN_ages= (False) only swap in astroNN ages, not parameters and abundances and distances
use_astroNN_orbits= (False) only swap in orbits/Galactocentric coordinates
adddist= (default: False) add distances (DR10/11 Hayden distances, DR12 combined distances)
distredux= (default: DR default) reduction on which the distances are based
rmdups= (False) if True, remove duplicates (very slow)
raw= (False) if True, just return the raw file, read w/ fitsio
mjd= (58104) MJD of version for monthly internal pipeline runs
xmatch= (None) uses gaia_tools.xmatch.cds to x-match to an external catalog (eg., Gaia DR2 for xmatch='vizier:I/345/gaia2') and caches the result for re-use; requires jobovy/gaia_tools
+gaia_tools.xmatch.cds keywords
OUTPUT:
allStar data[,xmatched table]
HISTORY:
2013-09-06 - Written - Bovy (IAS)
2018-01-22 - Edited for new monthly pipeline runs - Bovy (UofT)
2018-05-09 - Add xmatch - Bovy (UofT)
2018-10-20 - Add use_astroNN option - Bovy (UofT)
2018-02-15 - Add astroNN distances and corresponding options - Bovy (UofT)
2018-02-16 - Add astroNN ages and corresponding options - Bovy (UofT)
2019-08-13 - Edited for DR16 (incl. astroNN) - Bovy (UofT)
"""
if dr is None:
filePath= path.allStarPath(mjd=mjd)
if not os.path.exists(filePath):
download.allStar(mjd=mjd)
#read allStar file
data= fitsread(path.allStarPath(mjd=mjd))
else:
filePath= path.allStarPath(mjd=mjd, dr=dr)
if not os.path.exists(filePath):
download.allStar(mjd=mjd, dr=dr)
#read allStar file
data= fitsread(path.allStarPath(mjd=mjd, dr=dr))
#Add astroNN? astroNN file matched line-by-line to allStar, so match here
# [ages file not matched line-by-line in DR14]
if use_astroNN or kwargs.get('astroNN',False) or use_astroNN_abundances:
_warn_astroNN_abundances()
astroNNdata= astroNN()
data= _swap_in_astroNN(data,astroNNdata)
del astroNNdata
if use_astroNN or kwargs.get('astroNN',False) or use_astroNN_distances:
_warn_astroNN_distances()
astroNNdata= astroNNDistances()
data= _add_astroNN_distances(data,astroNNdata)
del astroNNdata
if use_astroNN or kwargs.get('astroNN',False) or use_astroNN_ages:
_warn_astroNN_ages()
astroNNdata= astroNNAges()
data= _add_astroNN_ages(data,astroNNdata)
del astroNNdata
if use_astroNN or kwargs.get('astroNN',False) or use_astroNN_orbits:
_warn_astroNN_orbits()
astroNNdata= astroNN()
data= _add_astroNN_orbits(data,astroNNdata)
del astroNNdata
if raw: return data
#Remove duplicates, cache
if rmdups:
dupsFilename= path.allStarPath(mjd=mjd).replace('.fits','-nodups.fits')
#need to stop code from loading the cached duplicate free file, if crossmatching with astroNN results!
if use_astroNN or kwargs.get('astroNN',False) or use_astroNN_abundances or use_astroNN_distances or use_astroNN_ages:
astronn_used = True
else:
astronn_used = False
if os.path.exists(dupsFilename) and not astronn_used:
data= fitsread(dupsFilename)
else:
sys.stdout.write('\r'+"Removing duplicates (might take a while) and caching the duplicate-free file ... (file not cached if use_astroNN=True)\r")
sys.stdout.flush()
data= remove_duplicates(data)
#Cache this file for subsequent use of rmdups (only if not astroNN!)
if not astronn_used:
fitswrite(dupsFilename,data,clobber=True)
sys.stdout.write('\r'+_ERASESTR+'\r')
sys.stdout.flush()
if not xmatch is None:
from gaia_tools.load import _xmatch_cds
if rmdups:
matchFilePath= dupsFilename
else:
matchFilePath= filePath
if use_astroNN_ages:
matchFilePath= matchFilePath.replace('rc-','rc-astroNN-ages-')
ma,mai= _xmatch_cds(data,xmatch,filePath,**kwargs)
data= data[mai]
#Some cuts
if rmcommissioning:
try:
indx= numpy.array(['apogee.n.c'.encode('utf-8') in s for s in data['APSTAR_ID']])
indx+= numpy.array(['apogee.s.c'.encode('utf-8') in s for s in data['APSTAR_ID']])
except TypeError:
indx= numpy.array(['apogee.n.c' in s for s in data['APSTAR_ID']])
indx+= numpy.array(['apogee.s.c' in s for s in data['APSTAR_ID']])
data= data[True^indx]
if not xmatch is None: ma= ma[True^indx]
if rmnovisits:
indx= numpy.array([s.strip() != '' for s in data['VISITS']])
data= data[indx]
if not xmatch is None: ma= ma[indx]
if not survey.lower() == 'all' and 'SURVEY' in data.dtype.names:
#get rid of any trailing whitespace...
surv = numpy.array([data['SURVEY'][i].strip() for i in range(len(data['SURVEY']))])
if not isinstance(surv[0], (bytes,numpy.bytes_)):
surv = numpy.array([surv[i].encode('utf-8') for i in range(len(surv))])
if survey.lower() == 'apogee1':
indx = ((surv == b'apogee')
+ (surv == b'apogee,apogee-marvels')
+ (surv == b'apogee,apogee-marvels,apogee2')
+ (surv == b'apogee,apogee-marvels,apogee2-manga')
+ (surv == b'apogee,apogee2')
+ (surv == b'apogee,apogee2,apogee2-manga')
+ (surv == b'apogee,apogee2-manga')
+ (surv == b'apogee-marvels')
+ (surv == b'apogee-marvels,apogee2')
+ (surv == b'apogee-marvels,apogee2-manga'))
elif survey.lower() == 'apogee2':
indx = ((surv == b'apogee2')
+ (surv == b'apogee2-manga')
+ (surv == b'manga-apogee2')
+ (surv == b'apogee2,apogee2-manga')
+ (surv == b'apogee2s'))
data= data[indx]
if not xmatch is None: ma= ma[indx]
if test:
return data
if main:
indx= mainIndx(data)
data= data[indx]
if not xmatch is None: ma= ma[indx]
if akvers.lower() == 'targ':
aktag= 'AK_TARG'
elif akvers.lower() == 'wise':
aktag= 'AK_WISE'
if ak:
if not xmatch is None: ma= ma[True^numpy.isnan(data[aktag])]
data= data[True^numpy.isnan(data[aktag])]
if not xmatch is None: ma= ma[(data[aktag] > -50.)]
data= data[(data[aktag] > -50.)]
if exclude_star_bad:
if not xmatch is None: ma= ma[(data['ASPCAPFLAG'] & 2**23) == 0]
data= data[(data['ASPCAPFLAG'] & 2**23) == 0]
if exclude_star_warn:
if not xmatch is None: ma= ma[(data['ASPCAPFLAG'] & 2**7) == 0]
data= data[(data['ASPCAPFLAG'] & 2**7) == 0]
#Add dereddened J, H, and Ks
aj= data[aktag]*2.5
ah= data[aktag]*1.55
if _ESUTIL_LOADED:
data= esutil.numpy_util.add_fields(data,[('J0', float),
('H0', float),
('K0', float)])
data['J0']= data['J']-aj
data['H0']= data['H']-ah
data['K0']= data['K']-data[aktag]
data['J0'][(data[aktag] <= -50.)]= -9999.9999
data['H0'][(data[aktag] <= -50.)]= -9999.9999
data['K0'][(data[aktag] <= -50.)]= -9999.9999
else:
warnings.warn("Extinction-corrected J,H,K not added because esutil is not installed",RuntimeWarning)
#Add distances
if adddist and _ESUTIL_LOADED:
dist= fitsread(path.distPath(),1)
h=esutil.htm.HTM()
m1,m2,d12 = h.match(dist['RA'],dist['DEC'],
data['RA'],data['DEC'],
2./3600.,maxmatch=1)
data= data[m2]
if not xmatch is None: ma= ma[m2]
dist= dist[m1]
distredux= path._redux_dr()
if distredux.lower() == 'v302' or distredux.lower() == path._DR10REDUX:
data= esutil.numpy_util.add_fields(data,[('DM05', float),
('DM16', float),
('DM50', float),
('DM84', float),
('DM95', float),
('DMPEAK', float),
('DMAVG', float),
('SIG_DM', float),
('DIST_SOL', float),
('SIG_DISTSOL', float)])
data['DM05']= dist['DM05']
data['DM16']= dist['DM16']
data['DM50']= dist['DM50']
data['DM84']= dist['DM84']
data['DM95']= dist['DM95']
data['DMPEAK']= dist['DMPEAK']
data['DMAVG']= dist['DMAVG']
data['SIG_DM']= dist['SIG_DM']
data['DIST_SOL']= dist['DIST_SOL']/1000.
data['SIG_DISTSOL']= dist['SIG_DISTSOL']/1000.
elif distredux.lower() == path._DR11REDUX:
data= esutil.numpy_util.add_fields(data,[('DISO', float),
('DMASS', float),
('DISO_GAL', float),
('DMASS_GAL', float)])
data['DISO']= dist['DISO'][:,1]
data['DMASS']= dist['DMASS'][:,1]
data['DISO_GAL']= dist['DISO_GAL'][:,1]
data['DMASS_GAL']= dist['DMASS_GAL'][:,1]
elif distredux.lower() == path._DR12REDUX:
data= esutil.numpy_util.add_fields(data,[('HIP_PLX', float),
('HIP_E_PLX', float),
('RC_DIST', float),
('APOKASC_DIST_DIRECT', float),
('BPG_DIST1_MEAN', float),
('HAYDEN_DIST_PEAK', float),
('SCHULTHEIS_DIST', float)])
data['HIP_PLX']= dist['HIP_PLX']
data['HIP_E_PLX']= dist['HIP_E_PLX']
data['RC_DIST']= dist['RC_dist_pc']
data['APOKASC_DIST_DIRECT']= dist['APOKASC_dist_direct_pc']/1000.
data['BPG_DIST1_MEAN']= dist['BPG_dist1_mean']
data['HAYDEN_DIST_PEAK']= 10.**(dist['HAYDEN_distmod_PEAK']/5.-2.)
data['SCHULTHEIS_DIST']= dist['SCHULTHEIS_dist']
elif adddist:
warnings.warn("Distances not added because matching requires the uninstalled esutil module",RuntimeWarning)
if _ESUTIL_LOADED and (path._APOGEE_REDUX.lower() == 'current' \
or 'l3' in path._APOGEE_REDUX.lower() \
or int(path._APOGEE_REDUX[1:]) > 600):
data= esutil.numpy_util.add_fields(data,[('METALS', float),
('ALPHAFE', float)])
data['METALS']= data['PARAM'][:,paramIndx('metals')]
data['ALPHAFE']= data['PARAM'][:,paramIndx('alpha')]
if not xmatch is None:
return (data,ma)
else:
return data
def allVisit(rmcommissioning=True,
main=False,
ak=True,
akvers='targ',
plateInt=False,
plateS4=False,
mjd=58104,
raw=False):
"""
NAME:
allVisit
PURPOSE:
read the allVisit file
INPUT:
rmcommissioning= (default: True) if True, only use data obtained after commissioning
main= (default: False) if True, only select stars in the main survey
ak= (default: True) only use objects for which dereddened mags exist
akvers= 'targ' (default) or 'wise': use target AK (AK_TARG) or AK derived from all-sky WISE (AK_WISE)
plateInt= (False) if True, cast plate as an integer and give special plates -1
plateS4= (False) if True, cast plate as four character string
mjd= (58104) MJD of version for monthly internal pipeline runs
raw= (False) if True, just return the raw file, read w/ fitsio
OUTPUT:
allVisit data
HISTORY:
2013-11-07 - Written - Bovy (IAS)
2018-02-28 - Edited for new monthly pipeline runs - Bovy (UofT)
"""
filePath= path.allVisitPath(mjd=mjd)
if not os.path.exists(filePath):
download.allVisit(mjd=mjd)
#read allVisit file
data= fitsread(path.allVisitPath(mjd=mjd))
if raw: return data
#Some cuts
if rmcommissioning:
try:
indx= numpy.array(['apogee.n.c'.encode('utf-8') in s for s in data['VISIT_ID']])
indx+= numpy.array(['apogee.s.c'.encode('utf-8') in s for s in data['VISIT_ID']])
except TypeError:
indx= numpy.array(['apogee.n.c' in s for s in data['VISIT_ID']])
indx+= numpy.array(['apogee.s.c' in s for s in data['VISIT_ID']])
data= data[True^indx]
if main:
indx= mainIndx(data)
data= data[indx]
if akvers.lower() == 'targ':
aktag= 'AK_TARG'
elif akvers.lower() == 'wise':
aktag= 'AK_WISE'
if ak:
data= data[True^numpy.isnan(data[aktag])]
data= data[(data[aktag] > -50.)]
if plateInt or plateS4:
#If plate is a string, cast it as an integer
if isinstance(data['PLATE'][0],(bytes,str)):
#First cast the special plates as -1
plateDtype= data['PLATE'].dtype
data['PLATE'][data['PLATE'] == 'calibration'.ljust(int(str(plateDtype)[2:]))]= '-1'
data['PLATE'][data['PLATE'] == 'hip'.ljust(int(str(plateDtype)[2:]))]= '-1'
data['PLATE'][data['PLATE'] == 'misc'.ljust(int(str(plateDtype)[2:]))]= '-1'
data['PLATE'][data['PLATE'] == 'moving_groups'.ljust(int(str(plateDtype)[2:]))]= -1
data['PLATE'][data['PLATE'] == 'rrlyr'.ljust(int(str(plateDtype)[2:]))]= '-1'
#Now change the dtype to make plate an int
dt= data.dtype
dt= dt.descr
try:
plateDtypeIndx= dt.index(('PLATE', '|S13'))
except ValueError: #PLATE column is not string - try U
plateDtypeIndx = dt.index(('PLATE', '<U13'))
if plateInt:
dt[plateDtypeIndx]= (dt[plateDtypeIndx][0],'int')
dt= numpy.dtype(dt)
data= data.astype(dt)
# If we want the plate as a S4 string...
if plateS4:
#go to int first, as this field is formatted differently in diff releases...
dt[plateDtypeIndx]= (dt[plateDtypeIndx][0],'int')
dt= numpy.dtype(dt)
data= data.astype(dt)
dt= data.dtype
dt= dt.descr
plateDtypeIndx= dt.index(('PLATE', '<i8'))
dt[plateDtypeIndx]= (dt[plateDtypeIndx][0],'|S5')
dt= numpy.dtype(dt)
data= data.astype(dt)
#Add dereddened J, H, and Ks
aj= data[aktag]*2.5
ah= data[aktag]*1.55
if _ESUTIL_LOADED:
data= esutil.numpy_util.add_fields(data,[('J0', float),
('H0', float),
('K0', float)])
data['J0']= data['J']-aj
data['H0']= data['H']-ah
data['K0']= data['K']-data[aktag]
data['J0'][(data[aktag] <= -50.)]= -9999.9999
data['H0'][(data[aktag] <= -50.)]= -9999.9999
data['K0'][(data[aktag] <= -50.)]= -9999.9999
else:
warnings.warn("Extinction-corrected J,H,K not added because esutil is not installed",RuntimeWarning)
return data
def apokasc(rmcommissioning=True,
main=False):
"""
NAME:
apokasc
PURPOSE:
read the APOKASC data
INPUT:
rmcommissioning= (default: True) if True, only use data obtained after commissioning
main= (default: False) if True, only select stars in the main survey
OUTPUT:
APOKASC data
HISTORY:
2013-10-01 - Written - Bovy (IAS)
"""
if not _ESUTIL_LOADED:
raise ImportError("apogee.tools.read.apokasc function requires the esutil module for catalog matching")
#read allStar file
data= allStar(rmcommissioning=rmcommissioning,main=main,adddist=False,
rmdups=False)
#read the APOKASC file
kascdata= fitsread(path.apokascPath())
#Match these two
h=esutil.htm.HTM()
m1,m2,d12 = h.match(kascdata['RA'],kascdata['DEC'],
data['RA'],data['DEC'],
2./3600.,maxmatch=1)
data= data[m2]
kascdata= kascdata[m1]
kascdata= esutil.numpy_util.add_fields(kascdata,[('J0', float),
('H0', float),
('K0', float),
('APOGEE_TARGET1','>i4'),
('APOGEE_TARGET2','>i4'),
('APOGEE_ID', 'S18'),
('LOGG', float),
('TEFF', float),
('METALS', float),
('ALPHAFE', float),
('FNFE', float),
('FCFE', float)])
kascdata['J0']= data['J0']
kascdata['H0']= data['H0']
kascdata['K0']= data['K0']
kascdata['APOGEE_ID']= data['APOGEE_ID']
kascdata['APOGEE_TARGET1']= data['APOGEE_TARGET1']
kascdata['APOGEE_TARGET2']= data['APOGEE_TARGET2']
kascdata['LOGG']= data['LOGG']
kascdata['TEFF']= data['TEFF']
kascdata['METALS']= data['METALS']
kascdata['ALPHAFE']= data['ALPHAFE']
kascdata['FNFE']= data['FPARAM'][:,5]
kascdata['FCFE']= data['FPARAM'][:,4]
return kascdata
def rcsample(main=False,dr=None,xmatch=None,
use_astroNN=False,use_astroNN_abundances=False,
use_astroNN_distances=False,use_astroNN_ages=False,
use_astroNN_orbits=False,
**kwargs):
"""
NAME:
rcsample
PURPOSE:
read the rcsample file
INPUT:
main= (default: False) if True, only select stars in the main survey
dr= data reduction to load the catalog for (automatically set based on APOGEE_REDUX if not given explicitly)
xmatch= (None) uses gaia_tools.xmatch.cds to x-match to an external catalog (eg., Gaia DR2 for xmatch='vizier:I/345/gaia2') and caches the result for re-use; requires jobovy/gaia_tools
use_astroNN= (False) if True, swap in astroNN (Leung & Bovy 2019a) parameters (get placed in, e.g., TEFF and TEFF_ERR), astroNN distances (Leung & Bovy 2019b), and astroNN ages (Mackereth, Bovy, Leung, et al. (2019)
use_astroNN_abundances= (False) only swap in astroNN parameters and abundances, not distances and ages
use_astroNN_distances= (False) only swap in astroNN distances, not parameters and abundances and ages
use_astroNN_ages= (False) only swap in astroNN ages, not parameters and abundances and distances
use_astroNN_orbits= (False) only swap in astroNN orbit info and Galactocentric coordinates
+gaia_tools.xmatch.cds keywords
OUTPUT:
rcsample data[,xmatched table]
HISTORY:
2013-10-08 - Written - Bovy (IAS)
2018-05-09 - Add xmatch - Bovy (UofT)
2018-10-20 - Added use_astroNN - Bovy (UofT)
2018-02-15 - Add astroNN distances and corresponding options - Bovy (UofT)
2018-02-16 - Add astroNN ages and corresponding options - Bovy (UofT)
"""
if dr is None: dr= path._default_dr()
filePath= path.rcsamplePath(dr=dr)
if not os.path.exists(filePath):
download.rcsample(dr=dr)
#read rcsample file
data= fitsread(path.rcsamplePath(dr=dr))
# Swap in astroNN results?
if use_astroNN or kwargs.get('astroNN',False) or use_astroNN_abundances:
_warn_astroNN_abundances()
astroNNdata= astroNN()
# Match on (ra,dec)
m1,m2,_= _xmatch(data,astroNNdata,maxdist=2.,
colRA1='RA',colDec1='DEC',
colRA2='RA' if int(dr) < 16 else 'ra_apogee',
colDec2='DEC' if int(dr) < 16 else 'dec_apogee')
data= data[m1]
astroNNdata= astroNNdata[m2]
data= _swap_in_astroNN(data,astroNNdata)
if use_astroNN or kwargs.get('astroNN',False) or use_astroNN_distances:
_warn_astroNN_distances()
astroNNdata= astroNNDistances()
# Match on (ra,dec)
m1,m2,_= _xmatch(data,astroNNdata,maxdist=2.,
colRA1='RA',colDec1='DEC', colRA2='ra_apogee', colDec2='dec_apogee')
data= data[m1]
astroNNdata= astroNNdata[m2]
data= _add_astroNN_distances(data,astroNNdata)
if use_astroNN or kwargs.get('astroNN',False) or use_astroNN_ages:
_warn_astroNN_ages()
# Match on (ra,dec)
if int(dr) < 16:
astroNNdata= astroNNAges()
data = _add_astroNN_ages(data,astroNNdata)
else:
astroNNdata = astroNN() #ages are in the main VAC for DR > 16
m1,m2,_= _xmatch(data,astroNNdata,maxdist=2.,
colRA1='RA',colDec1='DEC',
colRA2='ra_apogee',
colDec2='dec_apogee')
data= data[m1]
astroNNdata= astroNNdata[m2]
data= _add_astroNN_ages(data,astroNNdata)
if use_astroNN or kwargs.get('astroNN', False) or use_astroNN_orbits:
if int(dr) < 16:
# no orbits in < 16 - so skip this step.
pass
else:
#orbits are in the main VAC for DR > 16
_warn_astroNN_orbits()
astroNNdata= astroNN()
#need to adjust the GALVR,T,Z names in the rc catalogs
data = numpy.lib.recfunctions.rename_fields(data, {'GALVR': 'RC_GALVR', 'GALVT': 'RC_GALVT', 'GALVZ': 'RC_GALVZ'})
# Match on (ra,dec)
m1,m2,_= _xmatch(data,astroNNdata,maxdist=2.,
colRA1='RA',colDec1='DEC',
colRA2='RA' if int(dr) < 16 else 'ra_apogee',
colDec2='DEC' if int(dr) < 16 else 'dec_apogee')
data= data[m1]
astroNNdata= astroNNdata[m2]
data= _add_astroNN_orbits(data,astroNNdata)
if not xmatch is None:
from gaia_tools.load import _xmatch_cds
if use_astroNN or kwargs.get('astroNN',False):
matchFilePath= filePath.replace('rc-','rc-astroNN-')
elif use_astroNN_abundances:
matchFilePath= filePath.replace('rc-','rc-astroNN-abundances-')
elif use_astroNN_distances:
matchFilePath= filePath.replace('rc-','rc-astroNN-distances-')
elif use_astroNN_ages:
matchFilePath= filePath.replace('rc-','rc-astroNN-ages-')
else:
matchFilePath= filePath
ma,mai= _xmatch_cds(data,xmatch,matchFilePath,**kwargs)
data= data[mai]
#Some cuts
if main:
indx= mainIndx(data)
data= data[indx]
if not xmatch is None: ma= ma[indx]
if not xmatch is None:
return (data,ma)
else:
return data
def astroNN(dr=None):
"""
NAME:
astroNN
PURPOSE:
read the astroNN file
INPUT:
dr= data reduction to load the catalog for (automatically set based on APOGEE_REDUX if not given explicitly)
OUTPUT:
astroNN data
HISTORY:
2018-10-20 - Written - Bovy (UofT)
2019-08-13 - Edited for DR16 - Bovy (UofT)
"""
filePath= path.astroNNPath(dr=dr)
if not os.path.exists(filePath):
download.astroNN(dr=dr)
#read astroNN file
return fitsread(path.astroNNPath(dr=dr))
def astroNNDistances(dr=None):
"""
NAME:
astroNNDistances
PURPOSE:
read the astroNN distances file
INPUT:
dr= data reduction to load the catalog for (automatically set based on APOGEE_REDUX if not given explicitly)
OUTPUT:
astroNN distances data
HISTORY:
2018-02-15 - Written - Bovy (UofT)
2019-08-13 - Edited for DR16 - Bovy (UofT)
"""
if not os.path.exists(path.astroNNDistancesPath(dr=dr)):
download.astroNNDistances(dr=dr)
#read astroNN file
return fitsread(path.astroNNDistancesPath(dr=dr))
def astroNNAges(dr=None):
"""
NAME:
astroNNAges
PURPOSE:
read the astroNN ages file
INPUT:
dr= data reduction to load the catalog for (automatically set based on APOGEE_REDUX if not given explicitly)
OUTPUT:
astroNN ages data
HISTORY:
2018-02-16 - Written - Bovy (UofT)
2019-08-13 - Edited for DR16 - Bovy (UofT)
"""
if not os.path.exists(path.astroNNAgesPath(dr=dr)):
download.astroNNAges(dr=dr)
#read astroNN file
return fitsread(path.astroNNAgesPath(dr=dr))
def obslog(year=None, hemisphere=None):
"""
NAME:
obslog
PURPOSE:
read the observation summary up to a certain year
INPUT:
year= read up to this year (None)
OUTPUT:
observation log
HISTORY:
2013-11-04 - Written - Bovy (IAS)
"""
obslogfilename= path.obslogPath(year=year, hemisphere=hemisphere)
if not os.path.exists(obslogfilename):
download.obslog(year=year, hemisphere=hemisphere)
if year is None:
if path._default_dr() == '11':
year= 2
elif path._default_dr() == '12' or path._default_dr() == '13':
year= 3
elif path._default_dr() == '14':
year= 5
elif path._default_dr() == '16':
year= 7
else: raise IOError('No default year available for DR{}, need to set it by hand'.format(path._default_dr()))
if year > 3:
genfromtxtKwargs= {'delimiter':', ',
'dtype':[('Plate','int'),
('LocID','int'),
('ra','float'),
('dec','float'),
('A_ver','S14'),
('NObs_Ver_Done','int'),
('NObs_Ver_Plan','int'),
('Total_SN','float'),
('ObsHistory','S50')],
'skip_footer':0}
else:
genfromtxtKwargs= {'delimiter':'|',
'dtype':[('Fieldname','S14'),
('LocID','int'),
('ra','float'),
('dec','float'),
('Plate','int'),
('A_ver','S14'),
('DrilledHA','float'),
('HDB','int'),
('NObs_Plan','int'),
('NObs_Done','int'),
('NObs_Ver_Plan','int'),
('NObs_Ver_Done','int'),
('Total_SN','float'),
('Red_SN','float'),
('ManPriority','int'),
('Priority','float'),
('Time','float'),
('Shared','int'),
('Stars','int'),
('At_APO','int'),
('Reduction','int'),
('ObsHistory','S50'),
('UNKNOWN','S50'),
('UNKNOWN1','int'),
('UNKNOWN2','int'),
('ReductionHistory','S50')],
'skip_footer':1}
if int(numpy.__version__.split('.')[0]) < 1 \
or int(numpy.__version__.split('.')[1]) < 10:
genfromtxtKwargs['skiprows']= 1+(year<4)
else:
genfromtxtKwargs['skip_header']= 1+(year<4)
obslogtxt= numpy.genfromtxt(obslogfilename,**genfromtxtKwargs)
return obslogtxt
def apogeePlate(dr=None, stdize=False):
"""
NAME:
apogeePlate
PURPOSE:
read the apogeePlate file
INPUT:
dr= return the file corresponding to this data release
OUTPUT:
apogeePlate file
HISTORY:
2013-11-04 - Written - Bovy (IAS)
"""
filePath= path.apogeePlatePath(dr=dr)
if not os.path.exists(filePath):
download.apogeePlate(dr=dr)
out = fitsread(filePath)
if stdize and 'PLATE_ID' not in out.dtype.fields: #is new version file -> need to convert to more apogeePlate like
out= numpy.asarray(out) # makes sure that FITS_REC --> recarray for apy
names= list(out.dtype.names)
names[names.index('PLATE')]= 'PLATE_ID' #adjust relevant field names...
names[names.index('DESIGNID')] = 'DESIGN_ID'
out.dtype.names= names
nurec = numpy.recarray(len(numpy.unique(out['PLATE_ID'])), dtype=[('LOCATION_ID', '>i4'),
('PLATE_ID', '>i4'),
('DESIGN_ID', '>i4'),
('FIELD_NAME', '<U16')])
plateids = numpy.unique(out['PLATE_ID'])
for i in range(len(plateids)):
entries = out[out['PLATE_ID'] == plateids[i]]
nurec['PLATE_ID'][i] = plateids[i]
nurec['DESIGN_ID'][i] = numpy.unique(entries['DESIGN_ID'])
nurec['FIELD_NAME'][i] = entries['NAME'][0] #this isnt so essential?
nurec['LOCATION_ID'][i] = numpy.unique(entries['LOCATION_ID'])
out = nurec
return out
def apogeeDesign(dr=None,ap1ize=False):
"""
NAME:
apogeeDesign
PURPOSE:
read the apogeeDesign file
INPUT:
dr= return the file corresponding to this data release
ap1ize= (False) if True and DR >= 14: adjust tags to match APOGEE-1 more
OUTPUT:
apogeeDesign file
HISTORY:
2013-11-04 - Written - Bovy (IAS)
"""
filePath= path.apogeeDesignPath(dr=dr)
if not os.path.exists(filePath):
download.apogeeDesign(dr=dr)
out= fitsread(filePath)
if ap1ize and 'COHORT_SHORT_VERSION' in out.dtype.fields:
out= numpy.asarray(out) # makes sure that FITS_REC --> recarray for apy
names= list(out.dtype.names)
names[names.index('COHORT_SHORT_VERSION')]= 'SHORT_COHORT_VERSION'
names[names.index('COHORT_MEDIUM_VERSION')]= 'MEDIUM_COHORT_VERSION'
names[names.index('COHORT_LONG_VERSION')]= 'LONG_COHORT_VERSION'
out.dtype.names= names
out= esutil.numpy_util.add_fields(out,[('SHORT_COHORT_MIN_H', float),
('SHORT_COHORT_MAX_H', float),
('MEDIUM_COHORT_MIN_H', float),
('MEDIUM_COHORT_MAX_H', float),
('LONG_COHORT_MIN_H', float),
('LONG_COHORT_MAX_H', float)])
out['SHORT_COHORT_MIN_H']= out['COHORT_MIN_H'][:,0]
out['SHORT_COHORT_MAX_H']= out['COHORT_MAX_H'][:,0]
out['MEDIUM_COHORT_MIN_H']= out['COHORT_MIN_H'][:,1]
out['MEDIUM_COHORT_MAX_H']= out['COHORT_MAX_H'][:,1]
out['LONG_COHORT_MIN_H']= out['COHORT_MIN_H'][:,2]
out['LONG_COHORT_MAX_H']= out['COHORT_MAX_H'][:,2]
return out
def apogeeField(dr=None):
"""
NAME:
apogeeField
PURPOSE:
read the apogeeField file
INPUT:
dr= return the file corresponding to this data release
OUTPUT:
apogeeField file
HISTORY:
2013-11-04 - Written - Bovy (IAS)
"""
filePath= path.apogeeFieldPath(dr=dr)
if not os.path.exists(filePath):
download.apogeeField(dr=dr)
return fitsread(filePath)
def apogeeObject(field_name,dr=None,
ak=True,
akvers='targ'):
"""
NAME:
apogeePlate
PURPOSE:
read the apogeePlate file
INPUT:
field_name - name of the field
dr= return the file corresponding to this data release
ak= (default: True) only use objects for which dereddened mags exist
akvers= 'targ' (default) or 'wise': use target AK (AK_TARG) or AK derived from all-sky WISE (AK_WISE)
OUTPUT:
apogeeObject file
HISTORY:
2013-11-04 - Written - Bovy (IAS)
"""
filePath= path.apogeeObjectPath(field_name,dr=dr)
if not os.path.exists(filePath):
download.apogeeObject(field_name,dr=dr)
data= fitsread(filePath)
if akvers.lower() == 'targ':
aktag= 'AK_TARG'
elif akvers.lower() == 'wise':
aktag= 'AK_WISE'
if ak:
data= data[True^numpy.isnan(data[aktag])]
data= data[(data[aktag] > -50.)]
#Add dereddened J, H, and Ks
aj= data[aktag]*2.5
ah= data[aktag]*1.55
if _ESUTIL_LOADED:
data= esutil.numpy_util.add_fields(data,[('J0', float),
('H0', float),
('K0', float)])
data['J0']= data['J']-aj
data['H0']= data['H']-ah
data['K0']= data['K']-data[aktag]
data['J0'][(data[aktag] <= -50.)]= -9999.9999
data['H0'][(data[aktag] <= -50.)]= -9999.9999
data['K0'][(data[aktag] <= -50.)]= -9999.9999
else:
warnings.warn("Extinction-corrected J,H,K not added because esutil is not installed",RuntimeWarning)
return data
@specOnAspcapWavegrid
def aspcapStar(loc_id,apogee_id,telescope='apo25m',ext=1,dr=None,header=True,
aspcapWavegrid=False):
"""
NAME:
aspcapStar
PURPOSE:
Read an aspcapStar file for a given star
INPUT:
loc_id - location ID (field for 1m targets or after DR14)
apogee_id - APOGEE ID of the star
telescope= telescope used ('apo25m' [default], 'apo1m', 'lco25m')
ext= (1) extension to load
header= (True) if True, also return the header
dr= return the path corresponding to this data release (general default)
aspcapWavegrid= (False) if True, output the spectrum on the ASPCAP
wavelength grid
OUTPUT:
aspcapStar file or (aspcapStar file, header)
HISTORY:
2014-11-25 - Written - Bovy (IAS)
2018-01-22 - Edited for new post-DR14 path structure - Bovy (UofT)
"""
filePath= path.aspcapStarPath(loc_id,apogee_id,dr=dr,telescope=telescope)
if not os.path.exists(filePath):
download.aspcapStar(loc_id,apogee_id,dr=dr,telescope=telescope)
if not _FITSIO_LOADED:
# Using astropy, need to read header separately
if header:
data= fitsread(filePath,ext), headerread(filePath,ext)
else:
data= fitsread(filePath,ext)
else:
data= fitsread(filePath,ext,header=header)
return data
@specOnAspcapWavegrid
def apStar(loc_id,apogee_id,telescope='apo25m',
ext=1,dr=None,header=True,aspcapWavegrid=False):
"""
NAME:
apStar
PURPOSE:
Read an apStar file for a given star
INPUT:
loc_id - location ID (field for 1m targets or after DR14)
apogee_id - APOGEE ID of the star
telescope= telescope used ('apo25m' [default], 'apo1m', 'lco25m')
ext= (1) extension to load
header= (True) if True, also return the header
dr= return the path corresponding to this data release (general default)
aspcapWavegrid= (False) if True, output the spectrum on the ASPCAP
wavelength grid
OUTPUT:
apStar file or (apStar file, header)
HISTORY:
2015-01-13 - Written - Bovy (IAS)
2018-01-22 - Edited for new post-DR14 path structure - Bovy (UofT)
"""
filePath= path.apStarPath(loc_id,apogee_id,dr=dr,telescope=telescope)
if not os.path.exists(filePath):
download.apStar(loc_id,apogee_id,dr=dr,telescope=telescope)
if not _FITSIO_LOADED:
# Using astropy, need to read header separately
if header:
data= fitsread(filePath,ext), headerread(filePath,ext)
else:
data= fitsread(filePath,ext)
else:
data= fitsread(filePath,ext,header=header)
return data
def apVisit(plateid, mjd, fiberid, ext=1, telescope='apo25m',
dr=None, header=False):
"""
NAME: apVisit
PURPOSE: Read a single apVisit file for a given plate, MJD, and fiber
INPUT:
plateid = 4-digit plate ID (field for 1m targets), float
mjd = 5-digit MJD, float
fiberid = 3-digit fiber ID, float
ext = (1) extension to load
header = (False) if True, return ONLY the header for the specified extension
telescope= ('apo25m') Telescope at which this plate has been observed ('apo25m' for standard APOGEE-N, 'apo1m' for the 1m telescope)
dr = return the path corresponding to this data release (general default)
OUTPUT:
header=False:
1D array with apVisit fluxes (ext=1), or
1D array with apVisit flux errors (ext=2), or
1D wavelength grid (ext=4) **WARNING** SORTED FROM HIGH TO LOW WAVELENGTH !!!
etc.
go here to learn about other extension choices:
https://data.sdss.org/datamodel/files/APOGEE_REDUX/APRED_VERS/TELESCOPE/PLATE_ID/MJD5/apVisit.html
header=True:
header for the specified extension only (see link above)
HISTORY: 2016-11 - added by <NAME>
2019-01 - long overdue plateid vs. locid bugfix
added readheader function which doesn't fail for ext=0
2019-01-28 - Added telescope keyword - Bovy (UofT)
TODO: automatically find all apVisit files for a given apogee ID and download them
"""
filePath = path.apVisitPath(plateid, mjd, fiberid,
telescope=telescope,dr=dr)
if not os.path.exists(filePath):
download.apVisit(plateid, mjd, fiberid, telescope=telescope,dr=dr)
if header:
data = headerread(filePath, ext)
if not header: # stitch three chips together in increasing wavelength order
data = fitsread(filePath, ext)
data = data.flatten()
data = numpy.flipud(data)
return data
@modelspecOnApStarWavegrid
def modelSpec(lib='GK',teff=4500,logg=2.5,metals=0.,
cfe=0.,nfe=0.,afe=0.,vmicro=2.,
dr=None,header=True,ext=234,apStarWavegrid=None,**kwargs):
"""
NAME:
modelSpec
PURPOSE:
Read a model spectrum file
INPUT:
lib= ('GK') spectral library
teff= (4500) grid-point Teff
logg= (2.5) grid-point logg
metals= (0.) grid-point metallicity
cfe= (0.) grid-point carbon-enhancement
nfe= (0.) grid-point nitrogen-enhancement
afe= (0.) grid-point alpha-enhancement
vmicro= (2.) grid-point microturbulence
dr= return the path corresponding to this data release
ext= (234) extension to load (if ext=234, the blue, green, and red spectra will be combined [onto the aspcapStar wavelength grid by default, just concatenated if apStarWavegrid=False), with NaN where there is no model)
apStarWavegrid= (True) if False and ext=234, don't put the spectrum on the apStar wavelength grid, but just concatenate the blue, green, and red detector
header= (True) if True, also return the header (not for ext=234)
dr= return the path corresponding to this data release (general default)
+download kwargs
OUTPUT:
model spectrum or (model spectrum file, header)
HISTORY:
2015-01-13 - Written - Bovy (IAS)
2018-02-05 - Updated to account for changing detector ranges - Price-Jones (UofT)
"""
filePath= path.modelSpecPath(lib=lib,teff=teff,logg=logg,metals=metals,
cfe=cfe,nfe=nfe,afe=afe,vmicro=vmicro,dr=dr)
if not os.path.exists(filePath):
download.modelSpec(lib=lib,teff=teff,logg=logg,metals=metals,
cfe=cfe,nfe=nfe,afe=afe,vmicro=vmicro,dr=dr,
**kwargs)
# Need to use astropy's fits reader, bc the file has issues
import astropy.io.fits as apyfits
from astropy.utils.exceptions import AstropyUserWarning
import warnings
warnings.filterwarnings('ignore',category=AstropyUserWarning)
hdulist= apyfits.open(filePath)
# Find index of nearest grid point in Teff, logg, and metals
if dr is None: dr= path._default_dr()
if dr == '12':
logggrid= numpy.linspace(0.,5.,11)
metalsgrid= numpy.linspace(-2.5,0.5,7)
if lib.lower() == 'gk':
teffgrid= numpy.linspace(3500.,6000.,11)
teffIndx= numpy.argmin(numpy.fabs(teff-teffgrid))
elif lib.lower() == 'f':
teffgrid= numpy.linspace(5500.,8000.,11)
teffIndx= numpy.argmin(numpy.fabs(teff-teffgrid))
loggIndx= numpy.argmin(numpy.fabs(logg-logggrid))
metalsIndx= numpy.argmin(numpy.fabs(metals-metalsgrid))
if header and not ext == 234:
return (hdulist[ext].data[metalsIndx,loggIndx,teffIndx],
hdulist[ext].header)
elif not ext == 234:
return hdulist[ext].data[metalsIndx,loggIndx,teffIndx]
else: #ext == 234, combine 2,3,4
aspcapBlu_start,aspcapGre_start,aspcapRed_start,aspcapTotal = _aspcapPixelLimits(dr=dr)
out= numpy.zeros(aspcapTotal)
out[:aspcapGre_start]= hdulist[2].data[metalsIndx,loggIndx,teffIndx]
out[aspcapGre_start:aspcapRed_start]= hdulist[3].data[metalsIndx,loggIndx,teffIndx]
out[aspcapRed_start:]= hdulist[4].data[metalsIndx,loggIndx,teffIndx]
return out
def apWave(chip,ext=2,dr=None):
"""
NAME:
apWave
PURPOSE:
open an apWave file
INPUT:
chip - chip 'a', 'b', or 'c'
ext= (2) extension to read
dr= return the path corresponding to this data release
OUTPUT:
contents of HDU ext
HISTORY:
2015-02-27 - Written - Bovy (IAS)
"""
filePath= path.apWavePath(chip,dr=dr)
if not os.path.exists(filePath):
download.apWave(chip,dr=dr)
data= fitsread(filePath,ext)
return data
def apLSF(chip,ext=0,dr=None):
"""
NAME:
apLSF
PURPOSE:
open an apLSF file
INPUT:
chip - chip 'a', 'b', or 'c'
ext= (0) extension to read
dr= return the path corresponding to this data release
OUTPUT:
contents of HDU ext
HISTORY:
2015-03-12 - Written - Bovy (IAS)
"""
filePath= path.apLSFPath(chip,dr=dr)
if not os.path.exists(filePath):
download.apLSF(chip,dr=dr)
data= fitsread(filePath,ext)
return data
def mainIndx(data):
"""
NAME:
mainIndx
PURPOSE:
apply 'main' flag cuts and return the index of 'main' targets
INPUT:
data- data sample (with APOGEE_TARGET1 and APOGEE_TARGET2 flags)
OUTPUT:
index of 'main' targets in data
HISTORY:
2013-11-19 - Written - Bovy (IAS)
2018-03-27 - Edited for APOGEE-2 - Bovy (UofT)
"""
indx= (((data['APOGEE_TARGET1'] & 2**11) != 0)+((data['APOGEE_TARGET1'] & 2**12) != 0)+((data['APOGEE_TARGET1'] & 2**13) != 0))\
*((data['APOGEE_TARGET1'] & 2**7) == 0)\
*((data['APOGEE_TARGET1'] & 2**8) == 0)\
*((data['APOGEE_TARGET2'] & 2**9) == 0)
if 'APOGEE2_TARGET1' in data.dtype.names:
indx += (((data['APOGEE2_TARGET1'] & 2**11) != 0)+((data['APOGEE2_TARGET1'] & 2**12) != 0)+((data['APOGEE2_TARGET1'] & 2**13) != 0))\
*((data['APOGEE2_TARGET1'] & 2**7) == 0)\
*((data['APOGEE2_TARGET1'] & 2**8) == 0)\
*((data['APOGEE2_TARGET2'] & 2**9) == 0)
#*((data['APOGEE_TARGET1'] & 2**17) == 0)\
if 'SURVEY' in data.dtype.names: # APOGEE-2 file --> split by AP1 / AP2
#ensure the whitespace is gone... (whitespace was left in some old fits reading...)
survey = numpy.array([data['SURVEY'][i].strip() for i in range(len(data['SURVEY']))])
if not isinstance(survey[0], (bytes,numpy.bytes_)):
survey = numpy.array([survey[i].encode('utf-8') for i in range(len(survey))])
indx *= ((survey == b'apogee')\
+ (survey == b'apogee,apogee-marvels')\
+ (survey == b'apogee,apogee-marvels,apogee2')\
+ (survey == b'apogee,apogee-marvels,apogee2-manga')\
+ (survey == b'apogee,apogee2')\
+ (survey == b'apogee,apogee2,apogee2-manga')\
+ (survey == b'apogee,apogee2-manga')\
+ (survey == b'apogee-marvels')\
+ (survey == b'apogee-marvels,apogee2')\
+ (survey == b'apogee-marvels,apogee2-manga')\
+ (survey == b'apogee2')\
+ (survey == b'apogee2-manga')\
+ (survey == b'manga-apogee2')\
+ (survey == b'apogee2,apogee2-manga')\
+ (survey == b'apogee2s'))#\
return indx
def remove_duplicates(data):
"""
NAME:
remove_duplicates
PURPOSE:
remove duplicates from an array
INPUT:
data - array
OUTPUT:
array w/ duplicates removed
HISTORY:
2014-06-23 - Written - Bovy (IAS)
"""
if not _ESUTIL_LOADED:
raise ImportError("apogee.tools.read.remove_duplicates function requires the esutil module for catalog matching")
tdata= copy.copy(data)
#Match the data against itself
if _ESUTIL_VERSION[1] >= 5 \
and (_ESUTIL_VERSION[1] >= 6 or _ESUTIL_VERSION[2] >= 3):
h= esutil.htm.Matcher(10,data['RA'],data['DEC'])
m1,m2,d12 = h.match(data['RA'],data['DEC'],
2./3600.,maxmatch=0) #all matches
else:
h=esutil.htm.HTM()
htmrev2,minid,maxid = h.match_prepare(data['RA'],data['DEC'])
m1,m2,d12 = h.match(data['RA'],data['DEC'],
data['RA'],data['DEC'],
2./3600.,maxmatch=0, #all matches
htmrev2=htmrev2,minid=minid,maxid=maxid)
sindx= numpy.argsort(m1)
sm1= m1[sindx]
dup= sm1[1:] == sm1[:-1]
for d in tqdm.tqdm(sm1[:-1][dup]):
#Find the matches for just this duplicate
if _ESUTIL_VERSION[1] >= 5 \
and (_ESUTIL_VERSION[1] >= 6 or _ESUTIL_VERSION[2] >= 3):
nm1,nm2,nd12= h.match(data['RA'][d],data['DEC'][d],
2./3600.,maxmatch=0) #all matches
else:
nm1,nm2,nd12= h.match(data['RA'][d],data['DEC'][d],
data['RA'],data['DEC'],
2./3600.,maxmatch=0, #all matches
htmrev2=htmrev2,minid=minid,maxid=maxid)
#If some matches are commissioning data or have bad ak, rm from consideration
try:
comindx= numpy.array(['apogee.n.c'.encode('utf-8') in s for s in data['APSTAR_ID'][nm2]])
comindx+= numpy.array(['apogee.s.c'.encode('utf-8') in s for s in data['APSTAR_ID'][nm2]])
except TypeError:
comindx= numpy.array(['apogee.n.c' in s for s in data['APSTAR_ID'][nm2]])
comindx+= numpy.array(['apogee.s.c' in s for s in data['APSTAR_ID'][nm2]])
goodak= (True^numpy.isnan(data['AK_TARG'][nm2]))\
*(data['AK_TARG'][nm2] > -50.)
hisnr= numpy.argmax(data['SNR'][nm2]*(True^comindx)*goodak) #effect. make com zero SNR
if numpy.amax(data['SNR'][nm2]*(True^comindx)*goodak) == 0.: #all commissioning or bad ak, treat all equally
hisnr= numpy.argmax(data['SNR'][nm2])
tindx= numpy.ones(len(nm2),dtype='bool')
tindx[hisnr]= False
tdata['RA'][nm2[tindx]]= -9999
return tdata[tdata['RA'] != -9999]
def _xmatch(cat1,cat2,maxdist=2,
colRA1='RA',colDec1='DEC',colRA2='RA',colDec2='DEC'):
"""Internal version, basically copied and simplified from
gaia_tools.xmatch, but put here to avoid adding gaia_tools as
a dependency"""
try:
import astropy.coordinates as acoords
from astropy import units as u
except:
raise ImportError('The functionality that you are using requires astropy to be installed; please install astropy and run again')
mc1= acoords.SkyCoord(cat1[colRA1],cat1[colDec1],
unit=(u.degree, u.degree),frame='icrs')
mc2= acoords.SkyCoord(cat2[colRA2],cat2[colDec2],
unit=(u.degree, u.degree),frame='icrs')
idx,d2d,d3d = mc1.match_to_catalog_sky(mc2)
m1= numpy.arange(len(cat1))
mindx= d2d < maxdist*u.arcsec
m1= m1[mindx]
m2= idx[mindx]
return (m1,m2,d2d[mindx])
def _swap_in_astroNN(data,astroNNdata):
dr= path._default_dr()
if int(dr) == 14:
for tag,indx in zip(['TEFF','LOGG'],[0,1]):
data[tag]= astroNNdata['astroNN'][:,indx]
data[tag+'_ERR']= astroNNdata['astroNN_error'][:,indx]
for tag,indx in zip(['C','CI','N','O','Na','Mg','Al','Si','P','S','K',
'Ca','Ti','TiII','V','Cr','Mn','Fe','Co','Ni'],
range(2,22)):
data['X_H'][:,elemIndx(tag.upper())]=\
astroNNdata['astroNN'][:,indx]
data['X_H_ERR'][:,elemIndx(tag.upper())]=\
astroNNdata['astroNN_error'][:,indx]
if tag.upper() != 'FE':
data['{}_FE'.format(tag.upper())]=\
astroNNdata['astroNN'][:,indx]-astroNNdata['astroNN'][:,19]
data['{}_FE_ERR'.format(tag.upper())]=\
numpy.sqrt(astroNNdata['astroNN_error'][:,indx]**2.
+astroNNdata['astroNN_error'][:,19]**2.)
else:
data['FE_H'.format(tag.upper())]=\
astroNNdata['astroNN'][:,indx]
data['FE_H_ERR'.format(tag.upper())]=\
astroNNdata['astroNN_error'][:,indx]
else:
for tag in ['TEFF','LOGG']:
data[tag]= astroNNdata[tag]
data[tag+'_ERR']= astroNNdata[tag+'_ERR']
for tag,indx in zip(['C','CI','N','O','Na','Mg','Al','Si','P','S','K',
'Ca','Ti','TiII','V','Cr','Mn','Fe','Co','Ni'],
range(2,22)):
data['X_H'][:,elemIndx(tag.upper())]=\
astroNNdata[tag.upper()+'_H']
data['X_H_ERR'][:,elemIndx(tag.upper())]=\
astroNNdata[tag.upper()+'_H_ERR']
if tag.upper() != 'FE':
data['{}_FE'.format(tag.upper())]=\
astroNNdata[tag.upper()+'_H']-astroNNdata['FE_H']
data['{}_FE_ERR'.format(tag.upper())]=\
numpy.sqrt(astroNNdata['{}_H_ERR'.format(tag.upper())]**2.
+astroNNdata['FE_H_ERR']**2.)
else:
data['FE_H']= astroNNdata['FE_H']
data['FE_H_ERR']= astroNNdata['FE_H_ERR']
return data
def _add_astroNN_distances(data,astroNNDistancesdata):
dr= path._default_dr()
fields_to_append= ['dist','dist_model_error','dist_error',
'weighted_dist','weighted_dist_error']
if True:
# Faster way to join structured arrays (see https://stackoverflow.com/questions/5355744/numpy-joining-structured-arrays)
newdtype= data.dtype.descr+\
[(f,'<f8') for f in fields_to_append]
newdata= numpy.empty(len(data),dtype=newdtype)
for name in data.dtype.names:
newdata[name]= data[name]
for f in fields_to_append:
newdata[f]= astroNNDistancesdata[f]
return newdata
else:
return numpy.lib.recfunctions.append_fields(\
data,
fields_to_append,
[astroNNDistancesdata[f] for f in fields_to_append],
[astroNNDistancesdata[f].dtype for f in fields_to_append],
usemask=False)
def _add_astroNN_ages(data,astroNNAgesdata):
dr= path._default_dr()
if int(dr) == 14:
fields_to_append= ['astroNN_age','astroNN_age_total_std',
'astroNN_age_predictive_std',
'astroNN_age_model_std']
else:
fields_to_append= ['age','age_linear_correct','age_lowess_correct',
'age_total_error','age_model_error']
if True:
# Faster way to join structured arrays (see https://stackoverflow.com/questions/5355744/numpy-joining-structured-arrays)
newdtype= data.dtype.descr+\
[(f,'<f8') for f in fields_to_append]
newdata= numpy.empty(len(data),dtype=newdtype)
for name in data.dtype.names:
newdata[name]= data[name]
for f in fields_to_append:
newdata[f]= numpy.zeros(len(data))-9999.
data= newdata
else:
# This, for some reason, is the slow part (see numpy/numpy#7811
data= numpy.lib.recfunctions.append_fields(\
data,
fields_to_append,
[numpy.zeros(len(data))-9999. for f in fields_to_append],
usemask=False)
if int(dr) == 14: # Not row-matched to allStar, so need to match
# Only match primary targets
hash1= dict(zip(data['APOGEE_ID'][(data['EXTRATARG'] & 2**4) == 0],
numpy.arange(len(data))[(data['EXTRATARG'] & 2**4) == 0]))
hash2= dict(zip(astroNNAgesdata['APOGEE_ID'],
numpy.arange(len(astroNNAgesdata))))
common= numpy.intersect1d(\
data['APOGEE_ID'][(data['EXTRATARG'] & 2**4) == 0],
astroNNAgesdata['APOGEE_ID'])
indx1= list(itemgetter(*common)(hash1))
indx2= list(itemgetter(*common)(hash2))
for f in fields_to_append:
data[f][indx1]= astroNNAgesdata[f][indx2]
else:
for f in fields_to_append:
data[f]= astroNNAgesdata[f]
return data
def _add_astroNN_orbits(data,astroNNOrbitsdata):
dr= path._default_dr()
if int(dr) < 16:
warnings.warn("Tried to include orbits: No orbits or Galactocentric coordinates in DR < 16 catalogues!")
return data
if int(dr) == 16:
#also have galactocentric and orbit info
fields_to_append= [ 'GALR','GALPHI', 'GALZ','GALR_ERR','GALPHI_ERR','GALZ_ERR',
'GALVR','GALVT','GALVZ','GALVR_ERR','GALVT_ERR','GALVZ_ERR',
'GALVR_GALVT_CORR','GALVR_GALVZ_CORR','GALVT_GALVZ_CORR',
'e','e_err','zmax','zmax_err','rperi','rperi_err','rap','rap_err',
'e_zmax_corr','e_rperi_corr','e_rap_corr','zmax_rperi_corr',
'zmax_rap_corr','rperi_rap_corr','jr','jr_err','Lz','Lz_err',
'jz','jz_err','jr_Lz_corr','jr_jz_corr','lz_jz_corr',
'omega_r','omega_r_err','omega_phi','omega_phi_err',
'omega_z','omega_z_err','theta_r','theta_r_err',
'theta_phi','theta_phi_err','theta_z','theta_z_err',
'rl','rl_err','Energy','Energy_Err','EminusEc','EminusEc_err']
if True:
# Faster way to join structured arrays (see https://stackoverflow.com/questions/5355744/numpy-joining-structured-arrays)
newdtype= data.dtype.descr+\
[(f,'<f8') for f in fields_to_append]
newdata= numpy.empty(len(data),dtype=newdtype)
for name in data.dtype.names:
newdata[name]= data[name]
for f in fields_to_append:
newdata[f]= astroNNOrbitsdata[f]
return newdata
else:
return numpy.lib.recfunctions.append_fields(\
data,
fields_to_append,
[astroNNOrbitsdata[f] for f in fields_to_append],
[astroNNOrbitsdata[f].dtype for f in fields_to_append],
usemask=False)
def _warn_astroNN_abundances():
warnings.warn("Swapping in stellar parameters and abundances from Leung & Bovy (2019a)")
def _warn_astroNN_distances():
warnings.warn("Adding distances from Leung & Bovy (2019b)")
def _warn_astroNN_ages():
warnings.warn("Adding ages from Mackereth, Bovy, Leung, et al. (2019)")
def _warn_astroNN_orbits():
warnings.warn("Adding orbits and Galactocentric coordinates from DR16 astroNN VAC, calculated using galpy (Bovy 2015) and the staeckel approximation (Mackereth & Bovy 2018)")
| [
"sys.stdout.write",
"apogee.tools.path.rcsamplePath",
"apogee.tools.path.allVisitPath",
"apogee.tools.download.astroNNAges",
"apogee.tools.path._redux_dr",
"numpy.argmax",
"apogee.tools.download.apogeeField",
"apogee.tools.path.distPath",
"numpy.isnan",
"numpy.argsort",
"numpy.lib.recfunctions.a... | [((1809, 1820), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (1814, 1820), False, 'from functools import wraps\n'), ((3027, 3038), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (3032, 3038), False, 'from functools import wraps\n'), ((19071, 19097), 'apogee.tools.path.allVisitPath', 'path.allVisitPath', ([], {'mjd': 'mjd'}), '(mjd=mjd)\n', (19088, 19097), False, 'from apogee.tools import path, paramIndx, download\n'), ((23165, 23181), 'esutil.htm.HTM', 'esutil.htm.HTM', ([], {}), '()\n', (23179, 23181), False, 'import esutil\n'), ((23391, 23673), 'esutil.numpy_util.add_fields', 'esutil.numpy_util.add_fields', (['kascdata', "[('J0', float), ('H0', float), ('K0', float), ('APOGEE_TARGET1', '>i4'), (\n 'APOGEE_TARGET2', '>i4'), ('APOGEE_ID', 'S18'), ('LOGG', float), (\n 'TEFF', float), ('METALS', float), ('ALPHAFE', float), ('FNFE', float),\n ('FCFE', float)]"], {}), "(kascdata, [('J0', float), ('H0', float), ('K0',\n float), ('APOGEE_TARGET1', '>i4'), ('APOGEE_TARGET2', '>i4'), (\n 'APOGEE_ID', 'S18'), ('LOGG', float), ('TEFF', float), ('METALS', float\n ), ('ALPHAFE', float), ('FNFE', float), ('FCFE', float)])\n", (23419, 23673), False, 'import esutil\n'), ((26559, 26583), 'apogee.tools.path.rcsamplePath', 'path.rcsamplePath', ([], {'dr': 'dr'}), '(dr=dr)\n', (26576, 26583), False, 'from apogee.tools import path, paramIndx, download\n'), ((30517, 30540), 'apogee.tools.path.astroNNPath', 'path.astroNNPath', ([], {'dr': 'dr'}), '(dr=dr)\n', (30533, 30540), False, 'from apogee.tools import path, paramIndx, download\n'), ((32130, 32179), 'apogee.tools.path.obslogPath', 'path.obslogPath', ([], {'year': 'year', 'hemisphere': 'hemisphere'}), '(year=year, hemisphere=hemisphere)\n', (32145, 32179), False, 'from apogee.tools import path, paramIndx, download\n'), ((35096, 35148), 'numpy.genfromtxt', 'numpy.genfromtxt', (['obslogfilename'], {}), '(obslogfilename, **genfromtxtKwargs)\n', (35112, 35148), False, 'import numpy\n'), ((35478, 35505), 'apogee.tools.path.apogeePlatePath', 'path.apogeePlatePath', ([], {'dr': 'dr'}), '(dr=dr)\n', (35498, 35505), False, 'from apogee.tools import path, paramIndx, download\n'), ((37225, 37253), 'apogee.tools.path.apogeeDesignPath', 'path.apogeeDesignPath', ([], {'dr': 'dr'}), '(dr=dr)\n', (37246, 37253), False, 'from apogee.tools import path, paramIndx, download\n'), ((38930, 38957), 'apogee.tools.path.apogeeFieldPath', 'path.apogeeFieldPath', ([], {'dr': 'dr'}), '(dr=dr)\n', (38950, 38957), False, 'from apogee.tools import path, paramIndx, download\n'), ((39650, 39690), 'apogee.tools.path.apogeeObjectPath', 'path.apogeeObjectPath', (['field_name'], {'dr': 'dr'}), '(field_name, dr=dr)\n', (39671, 39690), False, 'from apogee.tools import path, paramIndx, download\n'), ((41648, 41714), 'apogee.tools.path.aspcapStarPath', 'path.aspcapStarPath', (['loc_id', 'apogee_id'], {'dr': 'dr', 'telescope': 'telescope'}), '(loc_id, apogee_id, dr=dr, telescope=telescope)\n', (41667, 41714), False, 'from apogee.tools import path, paramIndx, download\n'), ((43019, 43081), 'apogee.tools.path.apStarPath', 'path.apStarPath', (['loc_id', 'apogee_id'], {'dr': 'dr', 'telescope': 'telescope'}), '(loc_id, apogee_id, dr=dr, telescope=telescope)\n', (43034, 43081), False, 'from apogee.tools import path, paramIndx, download\n'), ((45032, 45099), 'apogee.tools.path.apVisitPath', 'path.apVisitPath', (['plateid', 'mjd', 'fiberid'], {'telescope': 'telescope', 'dr': 'dr'}), '(plateid, mjd, fiberid, telescope=telescope, dr=dr)\n', (45048, 45099), False, 'from apogee.tools import path, paramIndx, download\n'), ((46987, 47104), 'apogee.tools.path.modelSpecPath', 'path.modelSpecPath', ([], {'lib': 'lib', 'teff': 'teff', 'logg': 'logg', 'metals': 'metals', 'cfe': 'cfe', 'nfe': 'nfe', 'afe': 'afe', 'vmicro': 'vmicro', 'dr': 'dr'}), '(lib=lib, teff=teff, logg=logg, metals=metals, cfe=cfe,\n nfe=nfe, afe=afe, vmicro=vmicro, dr=dr)\n', (47005, 47104), False, 'from apogee.tools import path, paramIndx, download\n'), ((47529, 47591), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'AstropyUserWarning'}), "('ignore', category=AstropyUserWarning)\n", (47552, 47591), False, 'import warnings\n'), ((47604, 47626), 'astropy.io.fits.open', 'apyfits.open', (['filePath'], {}), '(filePath)\n', (47616, 47626), True, 'import astropy.io.fits as apyfits\n'), ((49283, 49311), 'apogee.tools.path.apWavePath', 'path.apWavePath', (['chip'], {'dr': 'dr'}), '(chip, dr=dr)\n', (49298, 49311), False, 'from apogee.tools import path, paramIndx, download\n'), ((49793, 49820), 'apogee.tools.path.apLSFPath', 'path.apLSFPath', (['chip'], {'dr': 'dr'}), '(chip, dr=dr)\n', (49807, 49820), False, 'from apogee.tools import path, paramIndx, download\n'), ((52581, 52596), 'copy.copy', 'copy.copy', (['data'], {}), '(data)\n', (52590, 52596), False, 'import copy\n'), ((53259, 53276), 'numpy.argsort', 'numpy.argsort', (['m1'], {}), '(m1)\n', (53272, 53276), False, 'import numpy\n'), ((53338, 53362), 'tqdm.tqdm', 'tqdm.tqdm', (['sm1[:-1][dup]'], {}), '(sm1[:-1][dup])\n', (53347, 53362), False, 'import tqdm\n'), ((55457, 55547), 'astropy.coordinates.SkyCoord', 'acoords.SkyCoord', (['cat1[colRA1]', 'cat1[colDec1]'], {'unit': '(u.degree, u.degree)', 'frame': '"""icrs"""'}), "(cat1[colRA1], cat1[colDec1], unit=(u.degree, u.degree),\n frame='icrs')\n", (55473, 55547), True, 'import astropy.coordinates as acoords\n'), ((55577, 55667), 'astropy.coordinates.SkyCoord', 'acoords.SkyCoord', (['cat2[colRA2]', 'cat2[colDec2]'], {'unit': '(u.degree, u.degree)', 'frame': '"""icrs"""'}), "(cat2[colRA2], cat2[colDec2], unit=(u.degree, u.degree),\n frame='icrs')\n", (55593, 55667), True, 'import astropy.coordinates as acoords\n'), ((55918, 55936), 'apogee.tools.path._default_dr', 'path._default_dr', ([], {}), '()\n', (55934, 55936), False, 'from apogee.tools import path, paramIndx, download\n'), ((58236, 58254), 'apogee.tools.path._default_dr', 'path._default_dr', ([], {}), '()\n', (58252, 58254), False, 'from apogee.tools import path, paramIndx, download\n'), ((59175, 59193), 'apogee.tools.path._default_dr', 'path._default_dr', ([], {}), '()\n', (59191, 59193), False, 'from apogee.tools import path, paramIndx, download\n'), ((61167, 61185), 'apogee.tools.path._default_dr', 'path._default_dr', ([], {}), '()\n', (61183, 61185), False, 'from apogee.tools import path, paramIndx, download\n'), ((63129, 63222), 'warnings.warn', 'warnings.warn', (['"""Swapping in stellar parameters and abundances from Leung & Bovy (2019a)"""'], {}), "(\n 'Swapping in stellar parameters and abundances from Leung & Bovy (2019a)')\n", (63142, 63222), False, 'import warnings\n'), ((63254, 63313), 'warnings.warn', 'warnings.warn', (['"""Adding distances from Leung & Bovy (2019b)"""'], {}), "('Adding distances from Leung & Bovy (2019b)')\n", (63267, 63313), False, 'import warnings\n'), ((63345, 63416), 'warnings.warn', 'warnings.warn', (['"""Adding ages from Mackereth, Bovy, Leung, et al. (2019)"""'], {}), "('Adding ages from Mackereth, Bovy, Leung, et al. (2019)')\n", (63358, 63416), False, 'import warnings\n'), ((63450, 63634), 'warnings.warn', 'warnings.warn', (['"""Adding orbits and Galactocentric coordinates from DR16 astroNN VAC, calculated using galpy (Bovy 2015) and the staeckel approximation (Mackereth & Bovy 2018)"""'], {}), "(\n 'Adding orbits and Galactocentric coordinates from DR16 astroNN VAC, calculated using galpy (Bovy 2015) and the staeckel approximation (Mackereth & Bovy 2018)'\n )\n", (63463, 63634), False, 'import warnings\n'), ((7722, 7747), 'apogee.tools.path.allStarPath', 'path.allStarPath', ([], {'mjd': 'mjd'}), '(mjd=mjd)\n', (7738, 7747), False, 'from apogee.tools import path, paramIndx, download\n'), ((7936, 7968), 'apogee.tools.path.allStarPath', 'path.allStarPath', ([], {'mjd': 'mjd', 'dr': 'dr'}), '(mjd=mjd, dr=dr)\n', (7952, 7968), False, 'from apogee.tools import path, paramIndx, download\n'), ((10507, 10552), 'gaia_tools.load._xmatch_cds', '_xmatch_cds', (['data', 'xmatch', 'filePath'], {}), '(data, xmatch, filePath, **kwargs)\n', (10518, 10552), False, 'from gaia_tools.load import _xmatch_cds\n'), ((13493, 13578), 'esutil.numpy_util.add_fields', 'esutil.numpy_util.add_fields', (['data', "[('J0', float), ('H0', float), ('K0', float)]"], {}), "(data, [('J0', float), ('H0', float), ('K0',\n float)])\n", (13521, 13578), False, 'import esutil\n'), ((13960, 14070), 'warnings.warn', 'warnings.warn', (['"""Extinction-corrected J,H,K not added because esutil is not installed"""', 'RuntimeWarning'], {}), "(\n 'Extinction-corrected J,H,K not added because esutil is not installed',\n RuntimeWarning)\n", (13973, 14070), False, 'import warnings\n'), ((14167, 14183), 'esutil.htm.HTM', 'esutil.htm.HTM', ([], {}), '()\n', (14181, 14183), False, 'import esutil\n'), ((14446, 14462), 'apogee.tools.path._redux_dr', 'path._redux_dr', ([], {}), '()\n', (14460, 14462), False, 'from apogee.tools import path, paramIndx, download\n'), ((17610, 17685), 'esutil.numpy_util.add_fields', 'esutil.numpy_util.add_fields', (['data', "[('METALS', float), ('ALPHAFE', float)]"], {}), "(data, [('METALS', float), ('ALPHAFE', float)])\n", (17638, 17685), False, 'import esutil\n'), ((19109, 19133), 'os.path.exists', 'os.path.exists', (['filePath'], {}), '(filePath)\n', (19123, 19133), False, 'import os\n'), ((19143, 19169), 'apogee.tools.download.allVisit', 'download.allVisit', ([], {'mjd': 'mjd'}), '(mjd=mjd)\n', (19160, 19169), False, 'from apogee.tools import path, paramIndx, download\n'), ((19213, 19239), 'apogee.tools.path.allVisitPath', 'path.allVisitPath', ([], {'mjd': 'mjd'}), '(mjd=mjd)\n', (19230, 19239), False, 'from apogee.tools import path, paramIndx, download\n'), ((21807, 21892), 'esutil.numpy_util.add_fields', 'esutil.numpy_util.add_fields', (['data', "[('J0', float), ('H0', float), ('K0', float)]"], {}), "(data, [('J0', float), ('H0', float), ('K0',\n float)])\n", (21835, 21892), False, 'import esutil\n'), ((22274, 22384), 'warnings.warn', 'warnings.warn', (['"""Extinction-corrected J,H,K not added because esutil is not installed"""', 'RuntimeWarning'], {}), "(\n 'Extinction-corrected J,H,K not added because esutil is not installed',\n RuntimeWarning)\n", (22287, 22384), False, 'import warnings\n'), ((23118, 23136), 'apogee.tools.path.apokascPath', 'path.apokascPath', ([], {}), '()\n', (23134, 23136), False, 'from apogee.tools import path, paramIndx, download\n'), ((26526, 26544), 'apogee.tools.path._default_dr', 'path._default_dr', ([], {}), '()\n', (26542, 26544), False, 'from apogee.tools import path, paramIndx, download\n'), ((26595, 26619), 'os.path.exists', 'os.path.exists', (['filePath'], {}), '(filePath)\n', (26609, 26619), False, 'import os\n'), ((26629, 26653), 'apogee.tools.download.rcsample', 'download.rcsample', ([], {'dr': 'dr'}), '(dr=dr)\n', (26646, 26653), False, 'from apogee.tools import path, paramIndx, download\n'), ((26697, 26721), 'apogee.tools.path.rcsamplePath', 'path.rcsamplePath', ([], {'dr': 'dr'}), '(dr=dr)\n', (26714, 26721), False, 'from apogee.tools import path, paramIndx, download\n'), ((29853, 29903), 'gaia_tools.load._xmatch_cds', '_xmatch_cds', (['data', 'xmatch', 'matchFilePath'], {}), '(data, xmatch, matchFilePath, **kwargs)\n', (29864, 29903), False, 'from gaia_tools.load import _xmatch_cds\n'), ((30552, 30576), 'os.path.exists', 'os.path.exists', (['filePath'], {}), '(filePath)\n', (30566, 30576), False, 'import os\n'), ((30586, 30609), 'apogee.tools.download.astroNN', 'download.astroNN', ([], {'dr': 'dr'}), '(dr=dr)\n', (30602, 30609), False, 'from apogee.tools import path, paramIndx, download\n'), ((30653, 30676), 'apogee.tools.path.astroNNPath', 'path.astroNNPath', ([], {'dr': 'dr'}), '(dr=dr)\n', (30669, 30676), False, 'from apogee.tools import path, paramIndx, download\n'), ((31155, 31187), 'apogee.tools.download.astroNNDistances', 'download.astroNNDistances', ([], {'dr': 'dr'}), '(dr=dr)\n', (31180, 31187), False, 'from apogee.tools import path, paramIndx, download\n'), ((31231, 31263), 'apogee.tools.path.astroNNDistancesPath', 'path.astroNNDistancesPath', ([], {'dr': 'dr'}), '(dr=dr)\n', (31256, 31263), False, 'from apogee.tools import path, paramIndx, download\n'), ((31717, 31744), 'apogee.tools.download.astroNNAges', 'download.astroNNAges', ([], {'dr': 'dr'}), '(dr=dr)\n', (31737, 31744), False, 'from apogee.tools import path, paramIndx, download\n'), ((31788, 31815), 'apogee.tools.path.astroNNAgesPath', 'path.astroNNAgesPath', ([], {'dr': 'dr'}), '(dr=dr)\n', (31808, 31815), False, 'from apogee.tools import path, paramIndx, download\n'), ((32191, 32221), 'os.path.exists', 'os.path.exists', (['obslogfilename'], {}), '(obslogfilename)\n', (32205, 32221), False, 'import os\n'), ((32231, 32280), 'apogee.tools.download.obslog', 'download.obslog', ([], {'year': 'year', 'hemisphere': 'hemisphere'}), '(year=year, hemisphere=hemisphere)\n', (32246, 32280), False, 'from apogee.tools import path, paramIndx, download\n'), ((35517, 35541), 'os.path.exists', 'os.path.exists', (['filePath'], {}), '(filePath)\n', (35531, 35541), False, 'import os\n'), ((35551, 35578), 'apogee.tools.download.apogeePlate', 'download.apogeePlate', ([], {'dr': 'dr'}), '(dr=dr)\n', (35571, 35578), False, 'from apogee.tools import path, paramIndx, download\n'), ((35740, 35758), 'numpy.asarray', 'numpy.asarray', (['out'], {}), '(out)\n', (35753, 35758), False, 'import numpy\n'), ((36397, 36426), 'numpy.unique', 'numpy.unique', (["out['PLATE_ID']"], {}), "(out['PLATE_ID'])\n", (36409, 36426), False, 'import numpy\n'), ((37265, 37289), 'os.path.exists', 'os.path.exists', (['filePath'], {}), '(filePath)\n', (37279, 37289), False, 'import os\n'), ((37299, 37327), 'apogee.tools.download.apogeeDesign', 'download.apogeeDesign', ([], {'dr': 'dr'}), '(dr=dr)\n', (37320, 37327), False, 'from apogee.tools import path, paramIndx, download\n'), ((37431, 37449), 'numpy.asarray', 'numpy.asarray', (['out'], {}), '(out)\n', (37444, 37449), False, 'import numpy\n'), ((37804, 38040), 'esutil.numpy_util.add_fields', 'esutil.numpy_util.add_fields', (['out', "[('SHORT_COHORT_MIN_H', float), ('SHORT_COHORT_MAX_H', float), (\n 'MEDIUM_COHORT_MIN_H', float), ('MEDIUM_COHORT_MAX_H', float), (\n 'LONG_COHORT_MIN_H', float), ('LONG_COHORT_MAX_H', float)]"], {}), "(out, [('SHORT_COHORT_MIN_H', float), (\n 'SHORT_COHORT_MAX_H', float), ('MEDIUM_COHORT_MIN_H', float), (\n 'MEDIUM_COHORT_MAX_H', float), ('LONG_COHORT_MIN_H', float), (\n 'LONG_COHORT_MAX_H', float)])\n", (37832, 38040), False, 'import esutil\n'), ((38969, 38993), 'os.path.exists', 'os.path.exists', (['filePath'], {}), '(filePath)\n', (38983, 38993), False, 'import os\n'), ((39003, 39030), 'apogee.tools.download.apogeeField', 'download.apogeeField', ([], {'dr': 'dr'}), '(dr=dr)\n', (39023, 39030), False, 'from apogee.tools import path, paramIndx, download\n'), ((39701, 39725), 'os.path.exists', 'os.path.exists', (['filePath'], {}), '(filePath)\n', (39715, 39725), False, 'import os\n'), ((39735, 39775), 'apogee.tools.download.apogeeObject', 'download.apogeeObject', (['field_name'], {'dr': 'dr'}), '(field_name, dr=dr)\n', (39756, 39775), False, 'from apogee.tools import path, paramIndx, download\n'), ((40143, 40228), 'esutil.numpy_util.add_fields', 'esutil.numpy_util.add_fields', (['data', "[('J0', float), ('H0', float), ('K0', float)]"], {}), "(data, [('J0', float), ('H0', float), ('K0',\n float)])\n", (40171, 40228), False, 'import esutil\n'), ((40610, 40720), 'warnings.warn', 'warnings.warn', (['"""Extinction-corrected J,H,K not added because esutil is not installed"""', 'RuntimeWarning'], {}), "(\n 'Extinction-corrected J,H,K not added because esutil is not installed',\n RuntimeWarning)\n", (40623, 40720), False, 'import warnings\n'), ((41723, 41747), 'os.path.exists', 'os.path.exists', (['filePath'], {}), '(filePath)\n', (41737, 41747), False, 'import os\n'), ((41757, 41823), 'apogee.tools.download.aspcapStar', 'download.aspcapStar', (['loc_id', 'apogee_id'], {'dr': 'dr', 'telescope': 'telescope'}), '(loc_id, apogee_id, dr=dr, telescope=telescope)\n', (41776, 41823), False, 'from apogee.tools import path, paramIndx, download\n'), ((43090, 43114), 'os.path.exists', 'os.path.exists', (['filePath'], {}), '(filePath)\n', (43104, 43114), False, 'import os\n'), ((43124, 43186), 'apogee.tools.download.apStar', 'download.apStar', (['loc_id', 'apogee_id'], {'dr': 'dr', 'telescope': 'telescope'}), '(loc_id, apogee_id, dr=dr, telescope=telescope)\n', (43139, 43186), False, 'from apogee.tools import path, paramIndx, download\n'), ((45142, 45166), 'os.path.exists', 'os.path.exists', (['filePath'], {}), '(filePath)\n', (45156, 45166), False, 'import os\n'), ((45176, 45243), 'apogee.tools.download.apVisit', 'download.apVisit', (['plateid', 'mjd', 'fiberid'], {'telescope': 'telescope', 'dr': 'dr'}), '(plateid, mjd, fiberid, telescope=telescope, dr=dr)\n', (45192, 45243), False, 'from apogee.tools import path, paramIndx, download\n'), ((45463, 45481), 'numpy.flipud', 'numpy.flipud', (['data'], {}), '(data)\n', (45475, 45481), False, 'import numpy\n'), ((47138, 47162), 'os.path.exists', 'os.path.exists', (['filePath'], {}), '(filePath)\n', (47152, 47162), False, 'import os\n'), ((47172, 47299), 'apogee.tools.download.modelSpec', 'download.modelSpec', ([], {'lib': 'lib', 'teff': 'teff', 'logg': 'logg', 'metals': 'metals', 'cfe': 'cfe', 'nfe': 'nfe', 'afe': 'afe', 'vmicro': 'vmicro', 'dr': 'dr'}), '(lib=lib, teff=teff, logg=logg, metals=metals, cfe=cfe,\n nfe=nfe, afe=afe, vmicro=vmicro, dr=dr, **kwargs)\n', (47190, 47299), False, 'from apogee.tools import path, paramIndx, download\n'), ((47715, 47733), 'apogee.tools.path._default_dr', 'path._default_dr', ([], {}), '()\n', (47731, 47733), False, 'from apogee.tools import path, paramIndx, download\n'), ((47771, 47799), 'numpy.linspace', 'numpy.linspace', (['(0.0)', '(5.0)', '(11)'], {}), '(0.0, 5.0, 11)\n', (47785, 47799), False, 'import numpy\n'), ((47816, 47844), 'numpy.linspace', 'numpy.linspace', (['(-2.5)', '(0.5)', '(7)'], {}), '(-2.5, 0.5, 7)\n', (47830, 47844), False, 'import numpy\n'), ((49322, 49346), 'os.path.exists', 'os.path.exists', (['filePath'], {}), '(filePath)\n', (49336, 49346), False, 'import os\n'), ((49356, 49384), 'apogee.tools.download.apWave', 'download.apWave', (['chip'], {'dr': 'dr'}), '(chip, dr=dr)\n', (49371, 49384), False, 'from apogee.tools import path, paramIndx, download\n'), ((49831, 49855), 'os.path.exists', 'os.path.exists', (['filePath'], {}), '(filePath)\n', (49845, 49855), False, 'import os\n'), ((49865, 49892), 'apogee.tools.download.apLSF', 'download.apLSF', (['chip'], {'dr': 'dr'}), '(chip, dr=dr)\n', (49879, 49892), False, 'from apogee.tools import path, paramIndx, download\n'), ((52746, 52793), 'esutil.htm.Matcher', 'esutil.htm.Matcher', (['(10)', "data['RA']", "data['DEC']"], {}), "(10, data['RA'], data['DEC'])\n", (52764, 52793), False, 'import esutil\n'), ((52926, 52942), 'esutil.htm.HTM', 'esutil.htm.HTM', ([], {}), '()\n', (52940, 52942), False, 'import esutil\n'), ((54555, 54613), 'numpy.argmax', 'numpy.argmax', (["(data['SNR'][nm2] * (True ^ comindx) * goodak)"], {}), "(data['SNR'][nm2] * (True ^ comindx) * goodak)\n", (54567, 54613), False, 'import numpy\n'), ((58871, 59068), 'numpy.lib.recfunctions.append_fields', 'numpy.lib.recfunctions.append_fields', (['data', 'fields_to_append', '[astroNNDistancesdata[f] for f in fields_to_append]', '[astroNNDistancesdata[f].dtype for f in fields_to_append]'], {'usemask': '(False)'}), '(data, fields_to_append, [\n astroNNDistancesdata[f] for f in fields_to_append], [\n astroNNDistancesdata[f].dtype for f in fields_to_append], usemask=False)\n', (58907, 59068), False, 'import numpy\n'), ((60681, 60784), 'numpy.intersect1d', 'numpy.intersect1d', (["data['APOGEE_ID'][data['EXTRATARG'] & 2 ** 4 == 0]", "astroNNAgesdata['APOGEE_ID']"], {}), "(data['APOGEE_ID'][data['EXTRATARG'] & 2 ** 4 == 0],\n astroNNAgesdata['APOGEE_ID'])\n", (60698, 60784), False, 'import numpy\n'), ((61215, 61329), 'warnings.warn', 'warnings.warn', (['"""Tried to include orbits: No orbits or Galactocentric coordinates in DR < 16 catalogues!"""'], {}), "(\n 'Tried to include orbits: No orbits or Galactocentric coordinates in DR < 16 catalogues!'\n )\n", (61228, 61329), False, 'import warnings\n'), ((62848, 63039), 'numpy.lib.recfunctions.append_fields', 'numpy.lib.recfunctions.append_fields', (['data', 'fields_to_append', '[astroNNOrbitsdata[f] for f in fields_to_append]', '[astroNNOrbitsdata[f].dtype for f in fields_to_append]'], {'usemask': '(False)'}), '(data, fields_to_append, [\n astroNNOrbitsdata[f] for f in fields_to_append], [astroNNOrbitsdata[f].\n dtype for f in fields_to_append], usemask=False)\n', (62884, 63039), False, 'import numpy\n'), ((1080, 1109), 'esutil.__version__.split', 'esutil.__version__.split', (['"""."""'], {}), "('.')\n", (1104, 1109), False, 'import esutil\n'), ((7763, 7787), 'os.path.exists', 'os.path.exists', (['filePath'], {}), '(filePath)\n', (7777, 7787), False, 'import os\n'), ((7801, 7826), 'apogee.tools.download.allStar', 'download.allStar', ([], {'mjd': 'mjd'}), '(mjd=mjd)\n', (7817, 7826), False, 'from apogee.tools import path, paramIndx, download\n'), ((7881, 7906), 'apogee.tools.path.allStarPath', 'path.allStarPath', ([], {'mjd': 'mjd'}), '(mjd=mjd)\n', (7897, 7906), False, 'from apogee.tools import path, paramIndx, download\n'), ((7984, 8008), 'os.path.exists', 'os.path.exists', (['filePath'], {}), '(filePath)\n', (7998, 8008), False, 'import os\n'), ((8022, 8054), 'apogee.tools.download.allStar', 'download.allStar', ([], {'mjd': 'mjd', 'dr': 'dr'}), '(mjd=mjd, dr=dr)\n', (8038, 8054), False, 'from apogee.tools import path, paramIndx, download\n'), ((8109, 8141), 'apogee.tools.path.allStarPath', 'path.allStarPath', ([], {'mjd': 'mjd', 'dr': 'dr'}), '(mjd=mjd, dr=dr)\n', (8125, 8141), False, 'from apogee.tools import path, paramIndx, download\n'), ((9613, 9641), 'os.path.exists', 'os.path.exists', (['dupsFilename'], {}), '(dupsFilename)\n', (9627, 9641), False, 'import os\n'), ((9731, 9887), 'sys.stdout.write', 'sys.stdout.write', (["('\\r' +\n 'Removing duplicates (might take a while) and caching the duplicate-free file ... (file not cached if use_astroNN=True)\\r'\n )"], {}), "('\\r' +\n 'Removing duplicates (might take a while) and caching the duplicate-free file ... (file not cached if use_astroNN=True)\\r'\n )\n", (9747, 9887), False, 'import sys\n'), ((9889, 9907), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (9905, 9907), False, 'import sys\n'), ((10134, 10175), 'sys.stdout.write', 'sys.stdout.write', (["('\\r' + _ERASESTR + '\\r')"], {}), "('\\r' + _ERASESTR + '\\r')\n", (10150, 10175), False, 'import sys\n'), ((10184, 10202), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (10200, 10202), False, 'import sys\n'), ((14138, 14153), 'apogee.tools.path.distPath', 'path.distPath', ([], {}), '()\n', (14151, 14153), False, 'from apogee.tools import path, paramIndx, download\n'), ((14561, 14797), 'esutil.numpy_util.add_fields', 'esutil.numpy_util.add_fields', (['data', "[('DM05', float), ('DM16', float), ('DM50', float), ('DM84', float), (\n 'DM95', float), ('DMPEAK', float), ('DMAVG', float), ('SIG_DM', float),\n ('DIST_SOL', float), ('SIG_DISTSOL', float)]"], {}), "(data, [('DM05', float), ('DM16', float), (\n 'DM50', float), ('DM84', float), ('DM95', float), ('DMPEAK', float), (\n 'DMAVG', float), ('SIG_DM', float), ('DIST_SOL', float), ('SIG_DISTSOL',\n float)])\n", (14589, 14797), False, 'import esutil\n'), ((17278, 17396), 'warnings.warn', 'warnings.warn', (['"""Distances not added because matching requires the uninstalled esutil module"""', 'RuntimeWarning'], {}), "(\n 'Distances not added because matching requires the uninstalled esutil module'\n , RuntimeWarning)\n", (17291, 17396), False, 'import warnings\n'), ((21401, 21416), 'numpy.dtype', 'numpy.dtype', (['dt'], {}), '(dt)\n', (21412, 21416), False, 'import numpy\n'), ((21636, 21651), 'numpy.dtype', 'numpy.dtype', (['dt'], {}), '(dt)\n', (21647, 21651), False, 'import numpy\n'), ((28749, 28860), 'numpy.lib.recfunctions.rename_fields', 'numpy.lib.recfunctions.rename_fields', (['data', "{'GALVR': 'RC_GALVR', 'GALVT': 'RC_GALVT', 'GALVZ': 'RC_GALVZ'}"], {}), "(data, {'GALVR': 'RC_GALVR', 'GALVT':\n 'RC_GALVT', 'GALVZ': 'RC_GALVZ'})\n", (28785, 28860), False, 'import numpy\n'), ((31112, 31144), 'apogee.tools.path.astroNNDistancesPath', 'path.astroNNDistancesPath', ([], {'dr': 'dr'}), '(dr=dr)\n', (31137, 31144), False, 'from apogee.tools import path, paramIndx, download\n'), ((31679, 31706), 'apogee.tools.path.astroNNAgesPath', 'path.astroNNAgesPath', ([], {'dr': 'dr'}), '(dr=dr)\n', (31699, 31706), False, 'from apogee.tools import path, paramIndx, download\n'), ((32313, 32331), 'apogee.tools.path._default_dr', 'path._default_dr', ([], {}), '()\n', (32329, 32331), False, 'from apogee.tools import path, paramIndx, download\n'), ((36607, 36641), 'numpy.unique', 'numpy.unique', (["entries['DESIGN_ID']"], {}), "(entries['DESIGN_ID'])\n", (36619, 36641), False, 'import numpy\n'), ((36761, 36797), 'numpy.unique', 'numpy.unique', (["entries['LOCATION_ID']"], {}), "(entries['LOCATION_ID'])\n", (36773, 36797), False, 'import numpy\n'), ((47897, 47931), 'numpy.linspace', 'numpy.linspace', (['(3500.0)', '(6000.0)', '(11)'], {}), '(3500.0, 6000.0, 11)\n', (47911, 47931), False, 'import numpy\n'), ((48169, 48196), 'numpy.fabs', 'numpy.fabs', (['(logg - logggrid)'], {}), '(logg - logggrid)\n', (48179, 48196), False, 'import numpy\n'), ((48229, 48260), 'numpy.fabs', 'numpy.fabs', (['(metals - metalsgrid)'], {}), '(metals - metalsgrid)\n', (48239, 48260), False, 'import numpy\n'), ((48630, 48654), 'numpy.zeros', 'numpy.zeros', (['aspcapTotal'], {}), '(aspcapTotal)\n', (48641, 48654), False, 'import numpy\n'), ((54646, 54702), 'numpy.amax', 'numpy.amax', (["(data['SNR'][nm2] * (True ^ comindx) * goodak)"], {}), "(data['SNR'][nm2] * (True ^ comindx) * goodak)\n", (54656, 54702), False, 'import numpy\n'), ((54771, 54801), 'numpy.argmax', 'numpy.argmax', (["data['SNR'][nm2]"], {}), "(data['SNR'][nm2])\n", (54783, 54801), False, 'import numpy\n'), ((3506, 3563), 'numpy.zeros', 'numpy.zeros', (['(aspcapTotal, out.shape[0])'], {'dtype': 'out.dtype'}), '((aspcapTotal, out.shape[0]), dtype=out.dtype)\n', (3517, 3563), False, 'import numpy\n'), ((3709, 3750), 'numpy.zeros', 'numpy.zeros', (['aspcapTotal'], {'dtype': 'out.dtype'}), '(aspcapTotal, dtype=out.dtype)\n', (3720, 3750), False, 'import numpy\n'), ((9228, 9253), 'apogee.tools.path.allStarPath', 'path.allStarPath', ([], {'mjd': 'mjd'}), '(mjd=mjd)\n', (9244, 9253), False, 'from apogee.tools import path, paramIndx, download\n'), ((10859, 10920), 'numpy.array', 'numpy.array', (["[('apogee.n.c' in s) for s in data['APSTAR_ID']]"], {}), "([('apogee.n.c' in s) for s in data['APSTAR_ID']])\n", (10870, 10920), False, 'import numpy\n'), ((10938, 10999), 'numpy.array', 'numpy.array', (["[('apogee.s.c' in s) for s in data['APSTAR_ID']]"], {}), "([('apogee.s.c' in s) for s in data['APSTAR_ID']])\n", (10949, 10999), False, 'import numpy\n'), ((12944, 12968), 'numpy.isnan', 'numpy.isnan', (['data[aktag]'], {}), '(data[aktag])\n', (12955, 12968), False, 'import numpy\n'), ((15763, 15882), 'esutil.numpy_util.add_fields', 'esutil.numpy_util.add_fields', (['data', "[('DISO', float), ('DMASS', float), ('DISO_GAL', float), ('DMASS_GAL', float)]"], {}), "(data, [('DISO', float), ('DMASS', float), (\n 'DISO_GAL', float), ('DMASS_GAL', float)])\n", (15791, 15882), False, 'import esutil\n'), ((17413, 17439), 'apogee.tools.path._APOGEE_REDUX.lower', 'path._APOGEE_REDUX.lower', ([], {}), '()\n', (17437, 17439), False, 'from apogee.tools import path, paramIndx, download\n'), ((17497, 17523), 'apogee.tools.path._APOGEE_REDUX.lower', 'path._APOGEE_REDUX.lower', ([], {}), '()\n', (17521, 17523), False, 'from apogee.tools import path, paramIndx, download\n'), ((17774, 17793), 'apogee.tools.paramIndx', 'paramIndx', (['"""metals"""'], {}), "('metals')\n", (17783, 17793), False, 'from apogee.tools import path, paramIndx, download\n'), ((17836, 17854), 'apogee.tools.paramIndx', 'paramIndx', (['"""alpha"""'], {}), "('alpha')\n", (17845, 17854), False, 'from apogee.tools import path, paramIndx, download\n'), ((19548, 19608), 'numpy.array', 'numpy.array', (["[('apogee.n.c' in s) for s in data['VISIT_ID']]"], {}), "([('apogee.n.c' in s) for s in data['VISIT_ID']])\n", (19559, 19608), False, 'import numpy\n'), ((19626, 19686), 'numpy.array', 'numpy.array', (["[('apogee.s.c' in s) for s in data['VISIT_ID']]"], {}), "([('apogee.s.c' in s) for s in data['VISIT_ID']])\n", (19637, 19686), False, 'import numpy\n'), ((19935, 19959), 'numpy.isnan', 'numpy.isnan', (['data[aktag]'], {}), '(data[aktag])\n', (19946, 19959), False, 'import numpy\n'), ((21111, 21126), 'numpy.dtype', 'numpy.dtype', (['dt'], {}), '(dt)\n', (21122, 21126), False, 'import numpy\n'), ((36043, 36072), 'numpy.unique', 'numpy.unique', (["out['PLATE_ID']"], {}), "(out['PLATE_ID'])\n", (36055, 36072), False, 'import numpy\n'), ((39957, 39981), 'numpy.isnan', 'numpy.isnan', (['data[aktag]'], {}), '(data[aktag])\n', (39968, 39981), False, 'import numpy\n'), ((47963, 47990), 'numpy.fabs', 'numpy.fabs', (['(teff - teffgrid)'], {}), '(teff - teffgrid)\n', (47973, 47990), False, 'import numpy\n'), ((48045, 48079), 'numpy.linspace', 'numpy.linspace', (['(5500.0)', '(8000.0)', '(11)'], {}), '(5500.0, 8000.0, 11)\n', (48059, 48079), False, 'import numpy\n'), ((54287, 54353), 'numpy.array', 'numpy.array', (["[('apogee.n.c' in s) for s in data['APSTAR_ID'][nm2]]"], {}), "([('apogee.n.c' in s) for s in data['APSTAR_ID'][nm2]])\n", (54298, 54353), False, 'import numpy\n'), ((54374, 54440), 'numpy.array', 'numpy.array', (["[('apogee.s.c' in s) for s in data['APSTAR_ID'][nm2]]"], {}), "([('apogee.s.c' in s) for s in data['APSTAR_ID'][nm2]])\n", (54385, 54440), False, 'import numpy\n'), ((54461, 54494), 'numpy.isnan', 'numpy.isnan', (["data['AK_TARG'][nm2]"], {}), "(data['AK_TARG'][nm2])\n", (54472, 54494), False, 'import numpy\n'), ((56780, 56886), 'numpy.sqrt', 'numpy.sqrt', (["(astroNNdata['astroNN_error'][:, indx] ** 2.0 + astroNNdata['astroNN_error'\n ][:, 19] ** 2.0)"], {}), "(astroNNdata['astroNN_error'][:, indx] ** 2.0 + astroNNdata[\n 'astroNN_error'][:, 19] ** 2.0)\n", (56790, 56886), False, 'import numpy\n'), ((60843, 60862), 'operator.itemgetter', 'itemgetter', (['*common'], {}), '(*common)\n', (60853, 60862), False, 'from operator import itemgetter\n'), ((60891, 60910), 'operator.itemgetter', 'itemgetter', (['*common'], {}), '(*common)\n', (60901, 60910), False, 'from operator import itemgetter\n'), ((2116, 2166), 'numpy.zeros', 'numpy.zeros', (['(8575, out.shape[0])'], {'dtype': 'out.dtype'}), '((8575, out.shape[0]), dtype=out.dtype)\n', (2127, 2166), False, 'import numpy\n'), ((2266, 2300), 'numpy.zeros', 'numpy.zeros', (['(8575)'], {'dtype': 'out.dtype'}), '(8575, dtype=out.dtype)\n', (2277, 2300), False, 'import numpy\n'), ((12894, 12918), 'numpy.isnan', 'numpy.isnan', (['data[aktag]'], {}), '(data[aktag])\n', (12905, 12918), False, 'import numpy\n'), ((16301, 16530), 'esutil.numpy_util.add_fields', 'esutil.numpy_util.add_fields', (['data', "[('HIP_PLX', float), ('HIP_E_PLX', float), ('RC_DIST', float), (\n 'APOKASC_DIST_DIRECT', float), ('BPG_DIST1_MEAN', float), (\n 'HAYDEN_DIST_PEAK', float), ('SCHULTHEIS_DIST', float)]"], {}), "(data, [('HIP_PLX', float), ('HIP_E_PLX', float\n ), ('RC_DIST', float), ('APOKASC_DIST_DIRECT', float), (\n 'BPG_DIST1_MEAN', float), ('HAYDEN_DIST_PEAK', float), (\n 'SCHULTHEIS_DIST', float)])\n", (16329, 16530), False, 'import esutil\n'), ((32374, 32392), 'apogee.tools.path._default_dr', 'path._default_dr', ([], {}), '()\n', (32390, 32392), False, 'from apogee.tools import path, paramIndx, download\n'), ((32404, 32422), 'apogee.tools.path._default_dr', 'path._default_dr', ([], {}), '()\n', (32420, 32422), False, 'from apogee.tools import path, paramIndx, download\n'), ((32465, 32483), 'apogee.tools.path._default_dr', 'path._default_dr', ([], {}), '()\n', (32481, 32483), False, 'from apogee.tools import path, paramIndx, download\n'), ((34873, 34901), 'numpy.__version__.split', 'numpy.__version__.split', (['"""."""'], {}), "('.')\n", (34896, 34901), False, 'import numpy\n'), ((34931, 34959), 'numpy.__version__.split', 'numpy.__version__.split', (['"""."""'], {}), "('.')\n", (34954, 34959), False, 'import numpy\n'), ((48111, 48138), 'numpy.fabs', 'numpy.fabs', (['(teff - teffgrid)'], {}), '(teff - teffgrid)\n', (48121, 48138), False, 'import numpy\n'), ((32526, 32544), 'apogee.tools.path._default_dr', 'path._default_dr', ([], {}), '()\n', (32542, 32544), False, 'from apogee.tools import path, paramIndx, download\n'), ((32674, 32692), 'apogee.tools.path._default_dr', 'path._default_dr', ([], {}), '()\n', (32690, 32692), False, 'from apogee.tools import path, paramIndx, download\n')] |
# -*- coding: utf-8 -*-
import matplotlib
import matplotlib.gridspec
import matplotlib.pyplot as plt
import numpy as np
def microstates_plot(microstates, segmentation=None, gfp=None, info=None, epoch=None):
"""**Visualize Microstates**
Plots the clustered microstates.
Parameters
----------
microstates : np.ndarray
The topographic maps of the found unique microstates which has a shape of n_channels x
n_states, generated from :func:`.microstates_segment`.
segmentation : array
For each sample, the index of the microstate to which the sample has been assigned.
Defaults to ``None``.
gfp : array
The range of global field power (GFP) values to visualize. Defaults to ``None``, which will
plot the whole range of GFP values.
info : dict
The dictionary output of :func:`.nk.microstates_segment`. Defaults to ``None``.
epoch : tuple
A sub-epoch of GFP to plot in the shape ``(beginning sample, end sample)``.
Returns
-------
fig
Plot of prototypical microstates maps and GFP across time.
Examples
---------
.. ipython:: python
import neurokit2 as nk
# Download data
eeg = nk.mne_data("filt-0-40_raw")
# Average rereference and band-pass filtering
eeg = nk.eeg_rereference(eeg, 'average').filter(1, 30, verbose=False)
# Cluster microstates
microstates = nk.microstates_segment(eeg, method='kmeans', n_microstates=4)
@savefig p_microstates_plot1.png scale=100%
nk.microstates_plot(microstates, epoch=(500, 750))
@suppress
plt.close()
"""
try:
import mne
except ImportError as e:
raise ImportError(
"The 'mne' module is required for this function to run. ",
"Please install it first (`pip install mne`).",
) from e
# Try retrieving info
if isinstance(microstates, dict):
if info is None and "Info" in microstates.keys():
info = microstates["Info"]
if gfp is None and "GFP" in microstates.keys():
gfp = microstates["GFP"]
segmentation = microstates["Sequence"]
microstates = microstates["Microstates"]
# Sanity checks
if gfp is None:
raise ValueError("GFP data must be passed to 'gfp' in order to plot the segmentation.")
# Prepare figure layout
n = len(microstates)
fig, ax = plt.subplot_mosaic([np.arange(n), ["GFP"] * n])
# Plot topomaps -----------------------------------------------------------
for i, map in enumerate(microstates):
_, _ = mne.viz.plot_topomap(map, info, axes=ax[i], show=False)
ax[i].set_title(f"{i}")
# Plot GFP ---------------------------------------------------------------
# Get x-axis
if info is not None and "sfreq" in info.keys():
times = np.arange(len(gfp)) / info["sfreq"]
else:
times = np.arange(len(gfp))
# Correct lengths
if len(segmentation) > len(gfp):
segmentation = segmentation[0 : len(gfp)]
if len(segmentation) < len(gfp):
gfp = gfp[0 : len(segmentation)]
if epoch is None:
epoch = (0, len(gfp))
cmap = plt.cm.get_cmap("plasma", n)
# Plot the GFP line above the area
ax["GFP"].plot(
times[epoch[0] : epoch[1]], gfp[epoch[0] : epoch[1]], color="black", linewidth=0.5
)
# Plot area
for state, color in zip(range(n), cmap.colors):
ax["GFP"].fill_between(
times[epoch[0] : epoch[1]],
gfp[epoch[0] : epoch[1]],
color=color,
where=(segmentation == state)[epoch[0] : epoch[1]],
)
# Create legend
norm = matplotlib.colors.Normalize(vmin=-0.5, vmax=n - 0.5)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array([])
fig.colorbar(sm, ax=ax["GFP"])
ax["GFP"].set_yticks([])
if info is not None and "sfreq" in info.keys():
ax["GFP"].set_xlabel("Time (s)")
else:
ax["GFP"].set_xlabel("Sample")
ax["GFP"].set_ylabel("Global Field Power (GFP)")
ax["GFP"].set_title("Microstates Sequence")
| [
"matplotlib.colors.Normalize",
"matplotlib.pyplot.cm.ScalarMappable",
"numpy.arange",
"matplotlib.pyplot.cm.get_cmap",
"mne.viz.plot_topomap"
] | [((3211, 3239), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""plasma"""', 'n'], {}), "('plasma', n)\n", (3226, 3239), True, 'import matplotlib.pyplot as plt\n'), ((3705, 3757), 'matplotlib.colors.Normalize', 'matplotlib.colors.Normalize', ([], {'vmin': '(-0.5)', 'vmax': '(n - 0.5)'}), '(vmin=-0.5, vmax=n - 0.5)\n', (3732, 3757), False, 'import matplotlib\n'), ((3767, 3810), 'matplotlib.pyplot.cm.ScalarMappable', 'plt.cm.ScalarMappable', ([], {'cmap': 'cmap', 'norm': 'norm'}), '(cmap=cmap, norm=norm)\n', (3788, 3810), True, 'import matplotlib.pyplot as plt\n'), ((2623, 2678), 'mne.viz.plot_topomap', 'mne.viz.plot_topomap', (['map', 'info'], {'axes': 'ax[i]', 'show': '(False)'}), '(map, info, axes=ax[i], show=False)\n', (2643, 2678), False, 'import mne\n'), ((2457, 2469), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (2466, 2469), True, 'import numpy as np\n')] |
"""
The purpose of this file is to demonstrate how one might write
naive code to do k-nearest neighbors by manually computing the
distances from a point to a collection of points and then using
argsort to find the indices of the closest points in the collection
"""
import matplotlib.pyplot as plt
import numpy as np
# Make 2 clusters. The first cluster is in the first
# 100 rows, the second cluster is in the next 100 rows
# centered at an offset of (10, 10)
N = 100
X = np.random.randn(N*2, 2)
X[100::, :] += np.array([10, 10])
q = np.array([3, 3]) # Query point
# How far is the query point from every other point
distances = np.zeros(N*2)
for i in range(N*2):
x = X[i, :] #Point under consideration is in the ith row of X
distances[i] = np.sqrt(np.sum((x-q)**2))
# Find the nearest neighbor indices by using argsort
n_neighbors = 10
neighbors = np.argsort(distances)[0:n_neighbors]
plt.figure(figsize=(8,8))
plt.scatter(X[:, 0], X[:, 1])
plt.scatter(q[0], q[0], 40, marker='x')
# Plot ten nearest neighbors
print(neighbors)
plt.scatter(X[neighbors, 0], X[neighbors, 1], 100, marker='*')
plt.show() | [
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.random.randn",
"matplotlib.pyplot.scatter",
"numpy.zeros",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.array"
] | [((477, 502), 'numpy.random.randn', 'np.random.randn', (['(N * 2)', '(2)'], {}), '(N * 2, 2)\n', (492, 502), True, 'import numpy as np\n'), ((516, 534), 'numpy.array', 'np.array', (['[10, 10]'], {}), '([10, 10])\n', (524, 534), True, 'import numpy as np\n'), ((543, 559), 'numpy.array', 'np.array', (['[3, 3]'], {}), '([3, 3])\n', (551, 559), True, 'import numpy as np\n'), ((639, 654), 'numpy.zeros', 'np.zeros', (['(N * 2)'], {}), '(N * 2)\n', (647, 654), True, 'import numpy as np\n'), ((907, 933), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (917, 933), True, 'import matplotlib.pyplot as plt\n'), ((933, 962), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {}), '(X[:, 0], X[:, 1])\n', (944, 962), True, 'import matplotlib.pyplot as plt\n'), ((963, 1002), 'matplotlib.pyplot.scatter', 'plt.scatter', (['q[0]', 'q[0]', '(40)'], {'marker': '"""x"""'}), "(q[0], q[0], 40, marker='x')\n", (974, 1002), True, 'import matplotlib.pyplot as plt\n'), ((1050, 1112), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[neighbors, 0]', 'X[neighbors, 1]', '(100)'], {'marker': '"""*"""'}), "(X[neighbors, 0], X[neighbors, 1], 100, marker='*')\n", (1061, 1112), True, 'import matplotlib.pyplot as plt\n'), ((1113, 1123), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1121, 1123), True, 'import matplotlib.pyplot as plt\n'), ((868, 889), 'numpy.argsort', 'np.argsort', (['distances'], {}), '(distances)\n', (878, 889), True, 'import numpy as np\n'), ((767, 787), 'numpy.sum', 'np.sum', (['((x - q) ** 2)'], {}), '((x - q) ** 2)\n', (773, 787), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import math
x = np.arange(0, np.pi*2, 0.1)
y = np.sin(x)
yHalf = y/2
def rms(array):
sumOfSquares = 0.0
for val in array:
sumOfSquares += val**2.0
meanSquare = sumOfSquares / array.size
return meanSquare ** 0.5
combinedRMS = ((rms(y)**2.0 + rms(yHalf)**2.0)/2.0)**0.5
combinedRMS2 = rms(np.concatenate((y, yHalf), axis=None))
print(f"\n RMS of y: {rms(y)}")
print(f"RMS of yHalf: {rms(yHalf)}")
print(f"Twice RMS of yHalf: {rms(yHalf) * 2.0}")
print(f"Combined RMS: {combinedRMS}")
print(f"Combined RMS2: {combinedRMS2} \n")
# plt.plot(x, y)
# plt.plot(x, yHalf)
# plt.show()
| [
"numpy.sin",
"numpy.arange",
"numpy.concatenate"
] | [((68, 96), 'numpy.arange', 'np.arange', (['(0)', '(np.pi * 2)', '(0.1)'], {}), '(0, np.pi * 2, 0.1)\n', (77, 96), True, 'import numpy as np\n'), ((100, 109), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (106, 109), True, 'import numpy as np\n'), ((360, 397), 'numpy.concatenate', 'np.concatenate', (['(y, yHalf)'], {'axis': 'None'}), '((y, yHalf), axis=None)\n', (374, 397), True, 'import numpy as np\n')] |
"""Genetic Algorithm.
"""
import copy
import numpy as np
import opytimizer.math.distribution as d
import opytimizer.math.general as g
import opytimizer.math.random as r
import opytimizer.utils.constant as c
import opytimizer.utils.exception as e
import opytimizer.utils.logging as l
from opytimizer.core import Optimizer
logger = l.get_logger(__name__)
class GA(Optimizer):
"""An GA class, inherited from Optimizer.
This is the designed class to define GA-related
variables and methods.
References:
<NAME>. An introduction to genetic algorithms. MIT Press (1998).
"""
def __init__(self, params=None):
"""Initialization method.
Args:
params (dict): Contains key-value parameters to the meta-heuristics.
"""
# Overrides its parent class with the receiving params
super(GA, self).__init__()
# Probability of selection
self.p_selection = 0.75
# Probability of mutation
self.p_mutation = 0.25
# Probability of crossover
self.p_crossover = 0.5
# Builds the class
self.build(params)
logger.info('Class overrided.')
@property
def p_selection(self):
"""float: Probability of selection.
"""
return self._p_selection
@p_selection.setter
def p_selection(self, p_selection):
if not isinstance(p_selection, (float, int)):
raise e.TypeError('`p_selection` should be a float or integer')
if p_selection < 0 or p_selection > 1:
raise e.ValueError('`p_selection` should be between 0 and 1')
self._p_selection = p_selection
@property
def p_mutation(self):
"""float: Probability of mutation.
"""
return self._p_mutation
@p_mutation.setter
def p_mutation(self, p_mutation):
if not isinstance(p_mutation, (float, int)):
raise e.TypeError('`p_mutation` should be a float or integer')
if p_mutation < 0 or p_mutation > 1:
raise e.ValueError('`p_mutation` should be between 0 and 1')
self._p_mutation = p_mutation
@property
def p_crossover(self):
"""float: Probability of crossover.
"""
return self._p_crossover
@p_crossover.setter
def p_crossover(self, p_crossover):
if not isinstance(p_crossover, (float, int)):
raise e.TypeError('`p_crossover` should be a float or integer')
if p_crossover < 0 or p_crossover > 1:
raise e.ValueError('`p_crossover` should be between 0 and 1')
self._p_crossover = p_crossover
def _roulette_selection(self, n_agents, fitness):
"""Performs a roulette selection on the population (p. 8).
Args:
n_agents (int): Number of agents allowed in the space.
fitness (list): A fitness list of every agent.
Returns:
The selected indexes of the population.
"""
# Calculates the number of selected individuals
n_individuals = int(n_agents * self.p_selection)
# Checks if `n_individuals` is an odd number
if n_individuals % 2 != 0:
# If it is, increase it by one
n_individuals += 1
# Defines the maximum fitness of current generation
max_fitness = np.max(fitness)
# Re-arrange the list of fitness by inverting it
# Note that we apply a trick due to it being designed for minimization
# f'(x) = f_max - f(x)
inv_fitness = [max_fitness - fit + c.EPSILON for fit in fitness]
# Calculates the total inverted fitness
total_fitness = np.sum(inv_fitness)
# Calculates the probability of each inverted fitness
probs = [fit / total_fitness for fit in inv_fitness]
# Performs the selection process
selected = d.generate_choice_distribution(n_agents, probs, n_individuals)
return selected
def _crossover(self, father, mother):
"""Performs the crossover between a pair of parents (p. 8).
Args:
father (Agent): Father to produce the offsprings.
mother (Agent): Mother to produce the offsprings.
Returns:
Two generated offsprings based on parents.
"""
# Makes a deep copy of father and mother
alpha, beta = copy.deepcopy(father), copy.deepcopy(mother)
# Generates a uniform random number
r1 = r.generate_uniform_random_number()
# If random number is smaller than crossover probability
if r1 < self.p_crossover:
# Generates another uniform random number
r2 = r.generate_uniform_random_number()
# Calculates the crossover based on a linear combination between father and mother
alpha.position = r2 * father.position + (1 - r2) * mother.position
# Calculates the crossover based on a linear combination between father and mother
beta.position = r2 * mother.position + (1 - r2) * father.position
return alpha, beta
def _mutation(self, alpha, beta):
"""Performs the mutation over offsprings (p. 8).
Args:
alpha (Agent): First offspring.
beta (Agent): Second offspring.
Returns:
Two mutated offsprings.
"""
# For every decision variable
for j in range(alpha.n_variables):
# Generates a uniform random number
r1 = r.generate_uniform_random_number()
# If random number is smaller than probability of mutation
if r1 < self.p_mutation:
# Mutates the offspring
alpha.position[j] += r.generate_gaussian_random_number()
# Generates another uniform random number
r2 = r.generate_uniform_random_number()
# If random number is smaller than probability of mutation
if r2 < self.p_mutation:
# Mutates the offspring
beta.position[j] += r.generate_gaussian_random_number()
return alpha, beta
def update(self, space, function):
"""Wraps Genetic Algorithm over all agents and variables.
Args:
space (Space): Space containing agents and update-related information.
function (Function): A Function object that will be used as the objective function.
"""
# Creates a list to hold the new population
new_agents = []
# Retrieves the number of agents
n_agents = len(space.agents)
# Calculates a list of fitness from every agent
fitness = [agent.fit + c.EPSILON for agent in space.agents]
# Selects the parents
selected = self._roulette_selection(n_agents, fitness)
# For every pair of selected parents
for s in g.n_wise(selected):
# Performs the crossover and mutation
alpha, beta = self._crossover(space.agents[s[0]], space.agents[s[1]])
alpha, beta = self._mutation(alpha, beta)
# Checking `alpha` and `beta` limits
alpha.clip_by_bound()
beta.clip_by_bound()
# Calculates new fitness for `alpha` and `beta`
alpha.fit = function(alpha.position)
beta.fit = function(beta.position)
# Appends the mutated agents to the children
new_agents.extend([alpha, beta])
# Joins both populations, sort agents and gathers best `n_agents`
space.agents += new_agents
space.agents.sort(key=lambda x: x.fit)
space.agents = space.agents[:n_agents]
| [
"opytimizer.math.random.generate_uniform_random_number",
"copy.deepcopy",
"numpy.sum",
"numpy.max",
"opytimizer.utils.logging.get_logger",
"opytimizer.math.random.generate_gaussian_random_number",
"opytimizer.utils.exception.ValueError",
"opytimizer.math.distribution.generate_choice_distribution",
"... | [((334, 356), 'opytimizer.utils.logging.get_logger', 'l.get_logger', (['__name__'], {}), '(__name__)\n', (346, 356), True, 'import opytimizer.utils.logging as l\n'), ((3345, 3360), 'numpy.max', 'np.max', (['fitness'], {}), '(fitness)\n', (3351, 3360), True, 'import numpy as np\n'), ((3675, 3694), 'numpy.sum', 'np.sum', (['inv_fitness'], {}), '(inv_fitness)\n', (3681, 3694), True, 'import numpy as np\n'), ((3880, 3942), 'opytimizer.math.distribution.generate_choice_distribution', 'd.generate_choice_distribution', (['n_agents', 'probs', 'n_individuals'], {}), '(n_agents, probs, n_individuals)\n', (3910, 3942), True, 'import opytimizer.math.distribution as d\n'), ((4479, 4513), 'opytimizer.math.random.generate_uniform_random_number', 'r.generate_uniform_random_number', ([], {}), '()\n', (4511, 4513), True, 'import opytimizer.math.random as r\n'), ((6874, 6892), 'opytimizer.math.general.n_wise', 'g.n_wise', (['selected'], {}), '(selected)\n', (6882, 6892), True, 'import opytimizer.math.general as g\n'), ((1452, 1509), 'opytimizer.utils.exception.TypeError', 'e.TypeError', (['"""`p_selection` should be a float or integer"""'], {}), "('`p_selection` should be a float or integer')\n", (1463, 1509), True, 'import opytimizer.utils.exception as e\n'), ((1575, 1630), 'opytimizer.utils.exception.ValueError', 'e.ValueError', (['"""`p_selection` should be between 0 and 1"""'], {}), "('`p_selection` should be between 0 and 1')\n", (1587, 1630), True, 'import opytimizer.utils.exception as e\n'), ((1935, 1991), 'opytimizer.utils.exception.TypeError', 'e.TypeError', (['"""`p_mutation` should be a float or integer"""'], {}), "('`p_mutation` should be a float or integer')\n", (1946, 1991), True, 'import opytimizer.utils.exception as e\n'), ((2055, 2109), 'opytimizer.utils.exception.ValueError', 'e.ValueError', (['"""`p_mutation` should be between 0 and 1"""'], {}), "('`p_mutation` should be between 0 and 1')\n", (2067, 2109), True, 'import opytimizer.utils.exception as e\n'), ((2419, 2476), 'opytimizer.utils.exception.TypeError', 'e.TypeError', (['"""`p_crossover` should be a float or integer"""'], {}), "('`p_crossover` should be a float or integer')\n", (2430, 2476), True, 'import opytimizer.utils.exception as e\n'), ((2542, 2597), 'opytimizer.utils.exception.ValueError', 'e.ValueError', (['"""`p_crossover` should be between 0 and 1"""'], {}), "('`p_crossover` should be between 0 and 1')\n", (2554, 2597), True, 'import opytimizer.utils.exception as e\n'), ((4376, 4397), 'copy.deepcopy', 'copy.deepcopy', (['father'], {}), '(father)\n', (4389, 4397), False, 'import copy\n'), ((4399, 4420), 'copy.deepcopy', 'copy.deepcopy', (['mother'], {}), '(mother)\n', (4412, 4420), False, 'import copy\n'), ((4685, 4719), 'opytimizer.math.random.generate_uniform_random_number', 'r.generate_uniform_random_number', ([], {}), '()\n', (4717, 4719), True, 'import opytimizer.math.random as r\n'), ((5510, 5544), 'opytimizer.math.random.generate_uniform_random_number', 'r.generate_uniform_random_number', ([], {}), '()\n', (5542, 5544), True, 'import opytimizer.math.random as r\n'), ((5839, 5873), 'opytimizer.math.random.generate_uniform_random_number', 'r.generate_uniform_random_number', ([], {}), '()\n', (5871, 5873), True, 'import opytimizer.math.random as r\n'), ((5731, 5766), 'opytimizer.math.random.generate_gaussian_random_number', 'r.generate_gaussian_random_number', ([], {}), '()\n', (5764, 5766), True, 'import opytimizer.math.random as r\n'), ((6059, 6094), 'opytimizer.math.random.generate_gaussian_random_number', 'r.generate_gaussian_random_number', ([], {}), '()\n', (6092, 6094), True, 'import opytimizer.math.random as r\n')] |
import os
import time
import can
import math
import numpy as np
from constants import *
bus_filters = [{"can_id": CAN_DRIVERLESS_ID, "can_mask": 0xfff, "extended": False}]
bus = can.interface.Bus(bustype='socketcan', channel='vcan0', bitrate=500000, receive_own_messages=False)
bus.set_filters(bus_filters)
steering_target = 0
steering_real = 0
motor_rpm = MOTOR_RPM
car_real_pos = [-9,-5,math.radians(90),0]
#car_real_pos = [0,0,math.radians(0),0]
t0_update = time.time()
def update_car():
global car_real_pos
global t0_update
global motor_rpm
global steering_real
x,y,theta,steer = car_real_pos
delta_t = time.time() - t0_update
speed = (RPM_TO_MS*motor_rpm)
x += speed * math.cos(theta) * delta_t
y += speed * math.sin(theta) * delta_t
theta += speed/CAR_LENGHT * math.tan(-steer) * delta_t
car_real_pos[0] = float(x + np.random.normal(0,SIMULATION_POS_ERROR,1))
car_real_pos[1] = float(y + np.random.normal(0,SIMULATION_POS_ERROR,1))
car_real_pos[2] = float(theta + np.random.normal(0,SIMULATION_THETA_ERROR,1))
t0_update = time.time()
def send_can(motor_rpm, real_steer):
rpm_hex = motor_rpm.to_bytes(4, 'big')
msg_VCU_Info_1_freq = 100
msg_VCU_Info_1 = can.Message(arbitration_id=CAN_MOTOR_RPM_ID,
data=[0, 0, 0, 0, rpm_hex[0], rpm_hex[1], rpm_hex[2], rpm_hex[3]],
is_extended_id=False)
# TODO FIX THIS
# simulate steer sensor
steer_sensor = min(STEER_MAX, real_steer)
steer_sensor = max(STEER_MIN, real_steer)
steer_sensor -= STEERING_OFFSET
if steer_sensor < -STEERING_OFFSET:
steer_sensor = 360 + steer_sensor
steer_sensor += STEERING_OFFSET
steer_sensor *= (65535/360)
steer_hex = int(steer_sensor).to_bytes(2, 'big')
msg_steering_freq = 100
msg_steering = can.Message(arbitration_id=CAN_STEERING_ID,
data=[steer_hex[0], steer_hex[1], 0, 0, 0, 0, 0, 0],
is_extended_id=False)
bus.send(msg_VCU_Info_1)
bus.send(msg_steering)
def receive_can():
global steering_target
msg = bus.recv(0.1)
if msg is not None:
if msg.arbitration_id == CAN_DRIVERLESS_ID:
steer_data = int(msg.data[7])
steering_target = steer_data - 128
t0_control = time.time()
def control_steer():
global t0_control
global steering_real
global steering_target
global car_real_pos
delta_time =time.time() - t0_control
t0_control = time.time()
err = steering_real - steering_target
if abs(err) > STEER_DEAD_ZONE:
if err > 0:
steering_real -= STEER_SPEED * delta_time
else:
steering_real += STEER_SPEED * delta_time
steering_real = max(steering_real, STEER_MIN)
steering_real = min(steering_real, STEER_MAX)
car_real_pos[3] = STEER_TO_WHEEL * steering_real
car_real_pos[3] = min(car_real_pos[3], STEER_WHEEL_MAX)
car_real_pos[3] = max(car_real_pos[3], STEER_WHEEL_MIN)
car_real_pos[3] = math.radians(car_real_pos[3])
def run_forever():
global car_real_pos
while(True):
send_can(motor_rpm, steering_real)
receive_can()
control_steer()
update_car()
if __name__ == "__main__":
print("Running tests")
run_forever()
print("Tests ended")
| [
"math.radians",
"math.tan",
"math.sin",
"time.time",
"can.interface.Bus",
"can.Message",
"math.cos",
"numpy.random.normal"
] | [((178, 281), 'can.interface.Bus', 'can.interface.Bus', ([], {'bustype': '"""socketcan"""', 'channel': '"""vcan0"""', 'bitrate': '(500000)', 'receive_own_messages': '(False)'}), "(bustype='socketcan', channel='vcan0', bitrate=500000,\n receive_own_messages=False)\n", (195, 281), False, 'import can\n'), ((465, 476), 'time.time', 'time.time', ([], {}), '()\n', (474, 476), False, 'import time\n'), ((2314, 2325), 'time.time', 'time.time', ([], {}), '()\n', (2323, 2325), False, 'import time\n'), ((391, 407), 'math.radians', 'math.radians', (['(90)'], {}), '(90)\n', (403, 407), False, 'import math\n'), ((1088, 1099), 'time.time', 'time.time', ([], {}), '()\n', (1097, 1099), False, 'import time\n'), ((1234, 1371), 'can.Message', 'can.Message', ([], {'arbitration_id': 'CAN_MOTOR_RPM_ID', 'data': '[0, 0, 0, 0, rpm_hex[0], rpm_hex[1], rpm_hex[2], rpm_hex[3]]', 'is_extended_id': '(False)'}), '(arbitration_id=CAN_MOTOR_RPM_ID, data=[0, 0, 0, 0, rpm_hex[0],\n rpm_hex[1], rpm_hex[2], rpm_hex[3]], is_extended_id=False)\n', (1245, 1371), False, 'import can\n'), ((1840, 1963), 'can.Message', 'can.Message', ([], {'arbitration_id': 'CAN_STEERING_ID', 'data': '[steer_hex[0], steer_hex[1], 0, 0, 0, 0, 0, 0]', 'is_extended_id': '(False)'}), '(arbitration_id=CAN_STEERING_ID, data=[steer_hex[0], steer_hex[1\n ], 0, 0, 0, 0, 0, 0], is_extended_id=False)\n', (1851, 1963), False, 'import can\n'), ((2504, 2515), 'time.time', 'time.time', ([], {}), '()\n', (2513, 2515), False, 'import time\n'), ((3033, 3062), 'math.radians', 'math.radians', (['car_real_pos[3]'], {}), '(car_real_pos[3])\n', (3045, 3062), False, 'import math\n'), ((635, 646), 'time.time', 'time.time', ([], {}), '()\n', (644, 646), False, 'import time\n'), ((2461, 2472), 'time.time', 'time.time', ([], {}), '()\n', (2470, 2472), False, 'import time\n'), ((710, 725), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (718, 725), False, 'import math\n'), ((753, 768), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (761, 768), False, 'import math\n'), ((811, 827), 'math.tan', 'math.tan', (['(-steer)'], {}), '(-steer)\n', (819, 827), False, 'import math\n'), ((870, 914), 'numpy.random.normal', 'np.random.normal', (['(0)', 'SIMULATION_POS_ERROR', '(1)'], {}), '(0, SIMULATION_POS_ERROR, 1)\n', (886, 914), True, 'import numpy as np\n'), ((946, 990), 'numpy.random.normal', 'np.random.normal', (['(0)', 'SIMULATION_POS_ERROR', '(1)'], {}), '(0, SIMULATION_POS_ERROR, 1)\n', (962, 990), True, 'import numpy as np\n'), ((1026, 1072), 'numpy.random.normal', 'np.random.normal', (['(0)', 'SIMULATION_THETA_ERROR', '(1)'], {}), '(0, SIMULATION_THETA_ERROR, 1)\n', (1042, 1072), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
This example demonstrates some of the plotting items available in pyqtgraph.
"""
import initExample ## Add path to library (just for examples; you do not need this)
from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
import pyqtgraph as pg
app = pg.mkQApp("InfiniteLine Example")
win = pg.GraphicsLayoutWidget(show=True, title="Plotting items examples")
win.resize(1000,600)
# Enable antialiasing for prettier plots
pg.setConfigOptions(antialias=True)
# Create a plot with some random data
p1 = win.addPlot(title="Plot Items example", y=np.random.normal(size=100, scale=10), pen=0.5)
p1.setYRange(-40, 40)
# Add three infinite lines with labels
inf1 = pg.InfiniteLine(movable=True, angle=90, label='x={value:0.2f}',
labelOpts={'position':0.1, 'color': (200,200,100), 'fill': (200,200,200,50), 'movable': True})
inf2 = pg.InfiniteLine(movable=True, angle=0, pen=(0, 0, 200), bounds = [-20, 20], hoverPen=(0,200,0), label='y={value:0.2f}mm',
labelOpts={'color': (200,0,0), 'movable': True, 'fill': (0, 0, 200, 100)})
inf3 = pg.InfiniteLine(movable=True, angle=45, pen='g', label='diagonal',
labelOpts={'rotateAxis': [1, 0], 'fill': (0, 200, 0, 100), 'movable': True})
inf1.setPos([2,2])
p1.addItem(inf1)
p1.addItem(inf2)
p1.addItem(inf3)
targetItem1 = pg.TargetItem(
label=True,
symbol="crosshair",
labelOpts={
"angle": 0
}
)
targetItem2 = pg.TargetItem(
pos=(30, 5),
size=20,
label="vert={1:0.2f}",
symbol="star",
pen="#F4511E",
labelOpts={
"angle": 45,
"offset": QtCore.QPoint(15, 15)
}
)
targetItem3 = pg.TargetItem(
pos=(10, 10),
size=10,
label="Third Label",
symbol="x",
pen="#00ACC1",
labelOpts={
"anchor": QtCore.QPointF(0.5, 0.5),
"offset": QtCore.QPointF(30, 0),
"color": "#558B2F",
"rotateAxis": (0, 1)
}
)
def callableFunction(x, y):
return f"Square Values: ({x**2:.4f}, {y**2:.4f})"
targetItem4 = pg.TargetItem(
pos=(10, -10),
label=callableFunction
)
p1.addItem(targetItem1)
p1.addItem(targetItem2)
p1.addItem(targetItem3)
p1.addItem(targetItem4)
# Add a linear region with a label
lr = pg.LinearRegionItem(values=[70, 80])
p1.addItem(lr)
label = pg.InfLineLabel(lr.lines[1], "region 1", position=0.95, rotateAxis=(1,0), anchor=(1, 1))
if __name__ == '__main__':
pg.mkQApp().exec_()
| [
"pyqtgraph.TargetItem",
"pyqtgraph.mkQApp",
"pyqtgraph.Qt.QtCore.QPoint",
"numpy.random.normal",
"pyqtgraph.LinearRegionItem",
"pyqtgraph.setConfigOptions",
"pyqtgraph.InfLineLabel",
"pyqtgraph.Qt.QtCore.QPointF",
"pyqtgraph.InfiniteLine",
"pyqtgraph.GraphicsLayoutWidget"
] | [((283, 316), 'pyqtgraph.mkQApp', 'pg.mkQApp', (['"""InfiniteLine Example"""'], {}), "('InfiniteLine Example')\n", (292, 316), True, 'import pyqtgraph as pg\n'), ((323, 390), 'pyqtgraph.GraphicsLayoutWidget', 'pg.GraphicsLayoutWidget', ([], {'show': '(True)', 'title': '"""Plotting items examples"""'}), "(show=True, title='Plotting items examples')\n", (346, 390), True, 'import pyqtgraph as pg\n'), ((454, 489), 'pyqtgraph.setConfigOptions', 'pg.setConfigOptions', ([], {'antialias': '(True)'}), '(antialias=True)\n', (473, 489), True, 'import pyqtgraph as pg\n'), ((692, 865), 'pyqtgraph.InfiniteLine', 'pg.InfiniteLine', ([], {'movable': '(True)', 'angle': '(90)', 'label': '"""x={value:0.2f}"""', 'labelOpts': "{'position': 0.1, 'color': (200, 200, 100), 'fill': (200, 200, 200, 50),\n 'movable': True}"}), "(movable=True, angle=90, label='x={value:0.2f}', labelOpts={\n 'position': 0.1, 'color': (200, 200, 100), 'fill': (200, 200, 200, 50),\n 'movable': True})\n", (707, 865), True, 'import pyqtgraph as pg\n'), ((882, 1089), 'pyqtgraph.InfiniteLine', 'pg.InfiniteLine', ([], {'movable': '(True)', 'angle': '(0)', 'pen': '(0, 0, 200)', 'bounds': '[-20, 20]', 'hoverPen': '(0, 200, 0)', 'label': '"""y={value:0.2f}mm"""', 'labelOpts': "{'color': (200, 0, 0), 'movable': True, 'fill': (0, 0, 200, 100)}"}), "(movable=True, angle=0, pen=(0, 0, 200), bounds=[-20, 20],\n hoverPen=(0, 200, 0), label='y={value:0.2f}mm', labelOpts={'color': (\n 200, 0, 0), 'movable': True, 'fill': (0, 0, 200, 100)})\n", (897, 1089), True, 'import pyqtgraph as pg\n'), ((1110, 1262), 'pyqtgraph.InfiniteLine', 'pg.InfiniteLine', ([], {'movable': '(True)', 'angle': '(45)', 'pen': '"""g"""', 'label': '"""diagonal"""', 'labelOpts': "{'rotateAxis': [1, 0], 'fill': (0, 200, 0, 100), 'movable': True}"}), "(movable=True, angle=45, pen='g', label='diagonal',\n labelOpts={'rotateAxis': [1, 0], 'fill': (0, 200, 0, 100), 'movable': True}\n )\n", (1125, 1262), True, 'import pyqtgraph as pg\n'), ((1362, 1431), 'pyqtgraph.TargetItem', 'pg.TargetItem', ([], {'label': '(True)', 'symbol': '"""crosshair"""', 'labelOpts': "{'angle': 0}"}), "(label=True, symbol='crosshair', labelOpts={'angle': 0})\n", (1375, 1431), True, 'import pyqtgraph as pg\n'), ((2056, 2108), 'pyqtgraph.TargetItem', 'pg.TargetItem', ([], {'pos': '(10, -10)', 'label': 'callableFunction'}), '(pos=(10, -10), label=callableFunction)\n', (2069, 2108), True, 'import pyqtgraph as pg\n'), ((2257, 2293), 'pyqtgraph.LinearRegionItem', 'pg.LinearRegionItem', ([], {'values': '[70, 80]'}), '(values=[70, 80])\n', (2276, 2293), True, 'import pyqtgraph as pg\n'), ((2317, 2410), 'pyqtgraph.InfLineLabel', 'pg.InfLineLabel', (['lr.lines[1]', '"""region 1"""'], {'position': '(0.95)', 'rotateAxis': '(1, 0)', 'anchor': '(1, 1)'}), "(lr.lines[1], 'region 1', position=0.95, rotateAxis=(1, 0),\n anchor=(1, 1))\n", (2332, 2410), True, 'import pyqtgraph as pg\n'), ((576, 612), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)', 'scale': '(10)'}), '(size=100, scale=10)\n', (592, 612), True, 'import numpy as np\n'), ((1640, 1661), 'pyqtgraph.Qt.QtCore.QPoint', 'QtCore.QPoint', (['(15)', '(15)'], {}), '(15, 15)\n', (1653, 1661), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((1826, 1850), 'pyqtgraph.Qt.QtCore.QPointF', 'QtCore.QPointF', (['(0.5)', '(0.5)'], {}), '(0.5, 0.5)\n', (1840, 1850), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((1870, 1891), 'pyqtgraph.Qt.QtCore.QPointF', 'QtCore.QPointF', (['(30)', '(0)'], {}), '(30, 0)\n', (1884, 1891), False, 'from pyqtgraph.Qt import QtGui, QtCore\n'), ((2438, 2449), 'pyqtgraph.mkQApp', 'pg.mkQApp', ([], {}), '()\n', (2447, 2449), True, 'import pyqtgraph as pg\n')] |
import numpy as np
grid = [['X', 'X', '?'], ['X', '?', 'X'], ['X', '?', '?']]
# Convert list of lists to a numpy array
arr = np.array(grid)
# Pad the array
arr_pad = np.pad(arr, ((1, 1), (1, 1)), 'constant')
arr_pad[arr_pad == '?'] = 0
count = 0
for i in range(1, len(grid) + 1):
for j in range(1, len(grid) + 1):
if arr_pad[i][j] == '0':
for m in [i - 1, i, i + 1]:
for n in [j - 1, j, j + 1]:
if arr_pad[m][n] == 'X':
count += 1
arr_pad[i][j] = count
count = 0
print(arr_pad[1:4, 1:4])
| [
"numpy.pad",
"numpy.array"
] | [((127, 141), 'numpy.array', 'np.array', (['grid'], {}), '(grid)\n', (135, 141), True, 'import numpy as np\n'), ((168, 209), 'numpy.pad', 'np.pad', (['arr', '((1, 1), (1, 1))', '"""constant"""'], {}), "(arr, ((1, 1), (1, 1)), 'constant')\n", (174, 209), True, 'import numpy as np\n')] |
from cmath import exp
import numpy as np
import pandas as pd
from itertools import product
from sklearn.model_selection import train_test_split
def eta_sample(n):
return np.random.uniform(-1, 1, size=n)
def epsilon_sample(n):
return np.random.uniform(-1, 1, size=n)
def exp_te(x):
return np.exp(2*x[0])
def ln_te(x):
return np.log(1+x[0])
def build_data_frame(
confounder_n,
covariate_n,
w, v, y, x, **kwargs,
):
data_dict = {}
for i in range(confounder_n):
data_dict[f'w_{i}'] = w[:, i]
for i in range(covariate_n):
data_dict[f'c_{i}'] = v[:, i]
def multi_var(x, name):
if len(x.shape) == 1 or x.shape[1] == 1:
data_dict[name] = x
else:
for i in range(x.shape[1]):
data_dict[f'{name}_{i}'] = x[:, i]
multi_var(x, 'treatment')
multi_var(y, 'outcome')
for k, v in kwargs.items():
data_dict[k] = v
data = pd.DataFrame(data_dict)
train, val = train_test_split(data)
return train, val
def multi_continuous_treatment(
n=6000,
n_w=30,
n_v=5,
random_seed=2022,
):
np.random.seed(random_seed)
support_size = 5
support_Y = np.random.choice(
np.arange(n_w), size=support_size, replace=False
)
coefs_Y = np.random.uniform(0, 1, size=support_size)
support_T = support_Y
coefs_T = np.random.uniform(0, 1, size=support_size)
W = np.random.normal(0, 1, size=(n, n_w))
X = np.random.uniform(0, 1, size=(n, n_v))
TE1 = np.array([exp_te(x_i) for x_i in X])
TE2 = np.array([ln_te(x_i) for x_i in X]).flatten()
T = np.dot(W[:, support_T], coefs_T) + eta_sample(n)
Y = TE1 * T + TE2 * T**2 + \
np.dot(W[:, support_Y], coefs_Y) + epsilon_sample(n)
T = T.reshape(-1, 1)
x = np.concatenate((T, T**2), axis=1)
train, val = build_data_frame(confounder_n=n_w,
covariate_n=n_v,
w=W, v=X, y=Y, x=x, te1=TE1, te2=TE2)
# test data
X_test = np.random.uniform(0, 1, size=(100, n_v))
X_test[:, 0] = np.linspace(0, 1, 100)
data_test_dic = {}
for i in range(n_v):
data_test_dic[f'c_{i}'] = X_test[:, i]
data_test = pd.DataFrame(data_test_dic)
expected_te1 = np.array([exp_te(x_i) for x_i in X_test])
expected_te2 = np.array([ln_te(x_i) for x_i in X_test]).flatten()
return train, val, (data_test, expected_te1, expected_te2)
def single_binary_treatment(
n=1000,
confounder_n=30,
covariate_n=4,
random_seed=2022,
):
np.random.seed(random_seed)
support_size = 5
# Outcome support
support_Y = np.random.choice(
range(confounder_n), size=support_size, replace=False)
coefs_Y = np.random.uniform(0, 1, size=support_size)
# Treatment support
support_T = support_Y
coefs_T = np.random.uniform(0, 1, size=support_size)
# Generate controls, covariates, treatments and outcomes
W = np.random.normal(0, 1, size=(n, confounder_n))
V = np.random.uniform(0, 1, size=(n, covariate_n))
# Heterogeneous treatment effects
TE = np.array([exp_te(x_i) for x_i in V])
# Define treatment
log_odds = np.dot(W[:, support_T], coefs_T) + eta_sample(n)
T_sigmoid = 1/(1 + np.exp(-log_odds))
x = np.array([np.random.binomial(1, p) for p in T_sigmoid])
# Define the outcome
Y = TE * x + np.dot(W[:, support_Y], coefs_Y) + epsilon_sample(n)
train, val = build_data_frame(confounder_n=confounder_n,
covariate_n=covariate_n,
w=W, v=V, y=Y, x=x, TE=TE)
return train, val, TE
def single_continuous_treatment(num=2000,
confounder_n=30,
covariate_n=1,
random_seed=2022,
data_frame=True):
np.random.seed(random_seed)
support_size = 5
support_y = np.random.choice(
np.arange(confounder_n), size=support_size, replace=False
)
coefs_y = np.random.uniform(0, 1, size=support_size)
support_t = support_y
coefs_t = np.random.uniform(0, 1, size=support_size)
w = np.random.normal(0, 1, size=(num, confounder_n))
c = np.random.uniform(0, 1, size=(num, covariate_n))
TE = np.array([exp_te(ci) for ci in c])
x = np.dot(w[:, support_t], coefs_t) + eta_sample(num)
y = TE * x + np.dot(w[:, support_y], coefs_y)\
+ epsilon_sample(num)
x_test = np.array(list(product(np.arange(0, 1, 0.01), repeat=covariate_n)))
if data_frame:
train, val = build_data_frame(confounder_n=confounder_n,
covariate_n=covariate_n,
w=w, v=c, y=y, x=x, TE=TE)
return train, val, TE
def meaningless_discrete_dataset_(num, treatment_effct,
confounder_n=2,
w_var=5,
eps=1e-4,
data_frame=True,
random_seed=2022,
instrument=False):
"""Generate a dataset where the treatment and outcome have some
confounders while the relation between the treatment and outcome
is linear. The treatment is an array of integers where each integer
indicates the treatment group assigned to the corresponding example.
The outcome is an array of float, i.e., we are building continuous
outcome.
Parameters
----------
num : int
The number of examples in the dataset.
confounder_n : int
The number of confounders of the treatment and outcome.
treatment_effct : list, optional. Defaults to None.
w_var : float, optional. Defaults to 0.5.
Variance of the confounder around its mean.
eps : float, optional. Defaults to 1e-4.
Noise level imposed to the data generating process.
data_frame : bool, optional. Defaults to True.
Return pandas.DataFrame if True.
random_seed : int, optional. Defaults to 2022.
instrument : bool, optional. Defaults to False.
Add instrument variables to the dataset if True.
Returns
----------
pandas.DataFrame, optional.
w_j's are confounders of outcome and treatment.
"""
np.random.seed(random_seed)
# Build treatment x which depends on the confounder w
x_num = len(treatment_effct)
w = [
np.random.normal(0, w_var*np.random.random_sample(), size=(num, 1))
for i in range(confounder_n)
]
w = np.concatenate(tuple(w), axis=1)
w_coef = np.random.rand(x_num, confounder_n)
x = w.dot(w_coef.T) + np.random.normal(0, eps, size=(num, x_num))
if instrument:
z = None
x = x.argmax(axis=1)
x_one_hot = np.eye(x_num)[x]
# Now we build the outcome y which depends on both x and w
x_coef = np.random.randn(1, confounder_n)
x_coef = np.concatenate(
(np.array(treatment_effct).reshape(1, -1), x_coef), axis=1
)
x_ = np.concatenate((x_one_hot, w), axis=1)
y = x_.dot(x_coef.T) + np.random.normal(0, eps, size=(num, 1))
# Return the dataset
if data_frame:
data_dict = {}
data_dict['treatment'] = x
if instrument:
data_dict['instrument'] = z
for i, j in enumerate(w.T):
data_dict[f'w_{i}'] = j
data_dict['outcome'] = y.reshape(num,)
data = pd.DataFrame(data_dict)
return data
else:
if instrument:
return (x, w, z, y)
else:
return (x, w, y)
def coupon_dataset(n_users, treatment_style='binary', with_income=False):
if with_income:
income = np.random.normal(500, scale=15, size=n_users)
gender = np.random.randint(0, 2, size=n_users)
coupon = gender * 20 + 110 + income / 50 \
+ np.random.normal(scale=5, size=n_users)
if treatment_style == 'binary':
coupon = (coupon > 120).astype(int)
amount = coupon * 150 + gender * 100 + 150 \
+ income / 5 + np.random.normal(size=n_users)
time_spent = coupon * 10 + amount / 10
df = pd.DataFrame({
'gender': gender,
'coupon': coupon,
'amount': amount,
'income': income,
'time_spent': time_spent,
})
else:
gender = np.random.randint(0, 2, size=n_users)
coupon = gender * 20 + 150 + np.random.normal(scale=5, size=n_users)
if treatment_style == 'binary':
coupon = (coupon > 150).astype(int)
amount = coupon * 30 + gender * 100 \
+ 150 + np.random.normal(size=n_users)
time_spent = coupon * 100 + amount / 10
df = pd.DataFrame({
'gender': gender,
'coupon': coupon,
'amount': amount,
'time_spent': time_spent,
})
return df
def meaningless_discrete_dataset(num, confounder_n,
treatment_effct=None,
prob=None,
w_var=0.5,
eps=1e-4,
coef_range=5e4,
data_frame=True,
random_seed=2022):
np.random.seed(random_seed)
samples = np.random.multinomial(num, prob)
# build treatment x with shape (num,), where the number of types
# of treatments is len(prob) and each treatment i is assigned a
# probability prob[i]
x = []
for i, sample in enumerate(samples):
x += [i for j in range(sample)]
np.random.shuffle(x)
# construct the confounder w
w = [
np.random.normal(0, w_var, size=(num,)) for i in range(confounder_n)
]
for i, w_ in enumerate(w, 1):
x = x + w_
x = np.round(x).astype(int)
for i, j in enumerate(x):
if j > len(prob) - 1:
x[i] = len(prob) - 1
elif j < 0:
x[i] = 0
# construct the outcome y
coef = np.random.randint(int(coef_range*eps), size=(confounder_n,))
y = np.random.normal(eps, size=(num,))
for i in range(len(y)):
y[i] = y[i] + treatment_effct[x[i]] * x[i]
for i, j in zip(coef, w):
y += i * j
if data_frame:
data_dict = {}
data_dict['treatment'] = x
for i, j in enumerate(w):
data_dict[f'w_{i}'] = j
data_dict['outcome'] = y
data = pd.DataFrame(data_dict)
return data, coef
else:
return (x, w, y, coef)
| [
"numpy.random.seed",
"numpy.random.random_sample",
"sklearn.model_selection.train_test_split",
"numpy.random.multinomial",
"numpy.random.randint",
"numpy.arange",
"numpy.exp",
"numpy.random.normal",
"numpy.round",
"pandas.DataFrame",
"numpy.random.randn",
"numpy.linspace",
"numpy.random.shuf... | [((178, 210), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': 'n'}), '(-1, 1, size=n)\n', (195, 210), True, 'import numpy as np\n'), ((247, 279), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': 'n'}), '(-1, 1, size=n)\n', (264, 279), True, 'import numpy as np\n'), ((308, 324), 'numpy.exp', 'np.exp', (['(2 * x[0])'], {}), '(2 * x[0])\n', (314, 324), True, 'import numpy as np\n'), ((350, 366), 'numpy.log', 'np.log', (['(1 + x[0])'], {}), '(1 + x[0])\n', (356, 366), True, 'import numpy as np\n'), ((960, 983), 'pandas.DataFrame', 'pd.DataFrame', (['data_dict'], {}), '(data_dict)\n', (972, 983), True, 'import pandas as pd\n'), ((1001, 1023), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data'], {}), '(data)\n', (1017, 1023), False, 'from sklearn.model_selection import train_test_split\n'), ((1149, 1176), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (1163, 1176), True, 'import numpy as np\n'), ((1311, 1353), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': 'support_size'}), '(0, 1, size=support_size)\n', (1328, 1353), True, 'import numpy as np\n'), ((1394, 1436), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': 'support_size'}), '(0, 1, size=support_size)\n', (1411, 1436), True, 'import numpy as np\n'), ((1446, 1483), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': '(n, n_w)'}), '(0, 1, size=(n, n_w))\n', (1462, 1483), True, 'import numpy as np\n'), ((1492, 1530), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': '(n, n_v)'}), '(0, 1, size=(n, n_v))\n', (1509, 1530), True, 'import numpy as np\n'), ((1819, 1854), 'numpy.concatenate', 'np.concatenate', (['(T, T ** 2)'], {'axis': '(1)'}), '((T, T ** 2), axis=1)\n', (1833, 1854), True, 'import numpy as np\n'), ((2058, 2098), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': '(100, n_v)'}), '(0, 1, size=(100, n_v))\n', (2075, 2098), True, 'import numpy as np\n'), ((2118, 2140), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (2129, 2140), True, 'import numpy as np\n'), ((2253, 2280), 'pandas.DataFrame', 'pd.DataFrame', (['data_test_dic'], {}), '(data_test_dic)\n', (2265, 2280), True, 'import pandas as pd\n'), ((2589, 2616), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (2603, 2616), True, 'import numpy as np\n'), ((2771, 2813), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': 'support_size'}), '(0, 1, size=support_size)\n', (2788, 2813), True, 'import numpy as np\n'), ((2878, 2920), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': 'support_size'}), '(0, 1, size=support_size)\n', (2895, 2920), True, 'import numpy as np\n'), ((2991, 3037), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': '(n, confounder_n)'}), '(0, 1, size=(n, confounder_n))\n', (3007, 3037), True, 'import numpy as np\n'), ((3046, 3092), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': '(n, covariate_n)'}), '(0, 1, size=(n, covariate_n))\n', (3063, 3092), True, 'import numpy as np\n'), ((3917, 3944), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (3931, 3944), True, 'import numpy as np\n'), ((4086, 4128), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': 'support_size'}), '(0, 1, size=support_size)\n', (4103, 4128), True, 'import numpy as np\n'), ((4169, 4211), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': 'support_size'}), '(0, 1, size=support_size)\n', (4186, 4211), True, 'import numpy as np\n'), ((4220, 4268), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': '(num, confounder_n)'}), '(0, 1, size=(num, confounder_n))\n', (4236, 4268), True, 'import numpy as np\n'), ((4277, 4325), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': '(num, covariate_n)'}), '(0, 1, size=(num, covariate_n))\n', (4294, 4325), True, 'import numpy as np\n'), ((6368, 6395), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (6382, 6395), True, 'import numpy as np\n'), ((6671, 6706), 'numpy.random.rand', 'np.random.rand', (['x_num', 'confounder_n'], {}), '(x_num, confounder_n)\n', (6685, 6706), True, 'import numpy as np\n'), ((6948, 6980), 'numpy.random.randn', 'np.random.randn', (['(1)', 'confounder_n'], {}), '(1, confounder_n)\n', (6963, 6980), True, 'import numpy as np\n'), ((7092, 7130), 'numpy.concatenate', 'np.concatenate', (['(x_one_hot, w)'], {'axis': '(1)'}), '((x_one_hot, w), axis=1)\n', (7106, 7130), True, 'import numpy as np\n'), ((9366, 9393), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (9380, 9393), True, 'import numpy as np\n'), ((9408, 9440), 'numpy.random.multinomial', 'np.random.multinomial', (['num', 'prob'], {}), '(num, prob)\n', (9429, 9440), True, 'import numpy as np\n'), ((9700, 9720), 'numpy.random.shuffle', 'np.random.shuffle', (['x'], {}), '(x)\n', (9717, 9720), True, 'import numpy as np\n'), ((10178, 10212), 'numpy.random.normal', 'np.random.normal', (['eps'], {'size': '(num,)'}), '(eps, size=(num,))\n', (10194, 10212), True, 'import numpy as np\n'), ((1241, 1255), 'numpy.arange', 'np.arange', (['n_w'], {}), '(n_w)\n', (1250, 1255), True, 'import numpy as np\n'), ((1643, 1675), 'numpy.dot', 'np.dot', (['W[:, support_T]', 'coefs_T'], {}), '(W[:, support_T], coefs_T)\n', (1649, 1675), True, 'import numpy as np\n'), ((3215, 3247), 'numpy.dot', 'np.dot', (['W[:, support_T]', 'coefs_T'], {}), '(W[:, support_T], coefs_T)\n', (3221, 3247), True, 'import numpy as np\n'), ((4008, 4031), 'numpy.arange', 'np.arange', (['confounder_n'], {}), '(confounder_n)\n', (4017, 4031), True, 'import numpy as np\n'), ((4378, 4410), 'numpy.dot', 'np.dot', (['w[:, support_t]', 'coefs_t'], {}), '(w[:, support_t], coefs_t)\n', (4384, 4410), True, 'import numpy as np\n'), ((6733, 6776), 'numpy.random.normal', 'np.random.normal', (['(0)', 'eps'], {'size': '(num, x_num)'}), '(0, eps, size=(num, x_num))\n', (6749, 6776), True, 'import numpy as np\n'), ((6854, 6867), 'numpy.eye', 'np.eye', (['x_num'], {}), '(x_num)\n', (6860, 6867), True, 'import numpy as np\n'), ((7158, 7197), 'numpy.random.normal', 'np.random.normal', (['(0)', 'eps'], {'size': '(num, 1)'}), '(0, eps, size=(num, 1))\n', (7174, 7197), True, 'import numpy as np\n'), ((7498, 7521), 'pandas.DataFrame', 'pd.DataFrame', (['data_dict'], {}), '(data_dict)\n', (7510, 7521), True, 'import pandas as pd\n'), ((7763, 7808), 'numpy.random.normal', 'np.random.normal', (['(500)'], {'scale': '(15)', 'size': 'n_users'}), '(500, scale=15, size=n_users)\n', (7779, 7808), True, 'import numpy as np\n'), ((7826, 7863), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {'size': 'n_users'}), '(0, 2, size=n_users)\n', (7843, 7863), True, 'import numpy as np\n'), ((8229, 8345), 'pandas.DataFrame', 'pd.DataFrame', (["{'gender': gender, 'coupon': coupon, 'amount': amount, 'income': income,\n 'time_spent': time_spent}"], {}), "({'gender': gender, 'coupon': coupon, 'amount': amount,\n 'income': income, 'time_spent': time_spent})\n", (8241, 8345), True, 'import pandas as pd\n'), ((8440, 8477), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {'size': 'n_users'}), '(0, 2, size=n_users)\n', (8457, 8477), True, 'import numpy as np\n'), ((8802, 8900), 'pandas.DataFrame', 'pd.DataFrame', (["{'gender': gender, 'coupon': coupon, 'amount': amount, 'time_spent': time_spent\n }"], {}), "({'gender': gender, 'coupon': coupon, 'amount': amount,\n 'time_spent': time_spent})\n", (8814, 8900), True, 'import pandas as pd\n'), ((9773, 9812), 'numpy.random.normal', 'np.random.normal', (['(0)', 'w_var'], {'size': '(num,)'}), '(0, w_var, size=(num,))\n', (9789, 9812), True, 'import numpy as np\n'), ((10537, 10560), 'pandas.DataFrame', 'pd.DataFrame', (['data_dict'], {}), '(data_dict)\n', (10549, 10560), True, 'import pandas as pd\n'), ((1733, 1765), 'numpy.dot', 'np.dot', (['W[:, support_Y]', 'coefs_Y'], {}), '(W[:, support_Y], coefs_Y)\n', (1739, 1765), True, 'import numpy as np\n'), ((3287, 3304), 'numpy.exp', 'np.exp', (['(-log_odds)'], {}), '(-log_odds)\n', (3293, 3304), True, 'import numpy as np\n'), ((3324, 3348), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'p'], {}), '(1, p)\n', (3342, 3348), True, 'import numpy as np\n'), ((3412, 3444), 'numpy.dot', 'np.dot', (['W[:, support_Y]', 'coefs_Y'], {}), '(W[:, support_Y], coefs_Y)\n', (3418, 3444), True, 'import numpy as np\n'), ((4446, 4478), 'numpy.dot', 'np.dot', (['w[:, support_y]', 'coefs_y'], {}), '(w[:, support_y], coefs_y)\n', (4452, 4478), True, 'import numpy as np\n'), ((7929, 7968), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(5)', 'size': 'n_users'}), '(scale=5, size=n_users)\n', (7945, 7968), True, 'import numpy as np\n'), ((8137, 8167), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'n_users'}), '(size=n_users)\n', (8153, 8167), True, 'import numpy as np\n'), ((8515, 8554), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(5)', 'size': 'n_users'}), '(scale=5, size=n_users)\n', (8531, 8554), True, 'import numpy as np\n'), ((8709, 8739), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'n_users'}), '(size=n_users)\n', (8725, 8739), True, 'import numpy as np\n'), ((9909, 9920), 'numpy.round', 'np.round', (['x'], {}), '(x)\n', (9917, 9920), True, 'import numpy as np\n'), ((4546, 4567), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.01)'], {}), '(0, 1, 0.01)\n', (4555, 4567), True, 'import numpy as np\n'), ((6532, 6557), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (6555, 6557), True, 'import numpy as np\n'), ((7019, 7044), 'numpy.array', 'np.array', (['treatment_effct'], {}), '(treatment_effct)\n', (7027, 7044), True, 'import numpy as np\n')] |
from __future__ import division
import numpy as np
import logging
import os
from copy import deepcopy
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
# viridis = cm.get_cmap('viridis', lut=10)
# magma = cm.get_cmap('magma')
# hot = cm.get_cmap('hot')
# seismic = cm.get_cmap('seismic')
#
# viridis_list = [[53, 42, 135],
# [15, 92, 221],
# [18, 125, 216],
# [7, 156, 207],
# [21, 177, 180],
# [89, 189, 140],
# [165, 190, 107],
# [225, 185, 82],
# [252, 206, 46],
# [249, 251, 14],
# [255, 255, 0]]
# viridis_list = np.array(viridis_list, dtype=np.float32)
#
# cmp_list = []
# for i in np.arange(0., 1.1, 0.1):
# cmp_list.append(cm.get_cmap('viridis', lut=10)(i)[:3])
def get_rgb_from_ratio(cmp_list, ratio):
cmp_idx = ratio * 10.
if cmp_idx <= 0.:
return cmp_list[0]
elif cmp_idx >= 10.:
return cmp_list[-1]
cmp_idx_lower = int(np.floor(cmp_idx))
cmp_idx_upper = cmp_idx_lower + 1
cmp_rgb_lower = cmp_list[cmp_idx_lower]
cmp_rgb_upper = cmp_list[cmp_idx_upper]
cmp_rgb_range = cmp_rgb_upper - cmp_rgb_lower
cmp_idx_offset = cmp_idx - np.floor(cmp_idx)
cmp_rgb = cmp_rgb_lower + cmp_idx_offset * cmp_rgb_range
return np.array([cmp_rgb], dtype=np.float32)
def get_rgbs_from_values(values, cmp='viridis'):
try:
color_map_fn = cm.get_cmap(cmp, lut=10)
except:
logging.warning(
"Error occur while try to use '{}' cmp, the default cmp 'viridis' was applied instead.".format(cmp))
color_map_fn = cm.get_cmap('viridis', lut=10)
cmp_list = []
for i in np.arange(0., 1.1, 0.1):
cmp_list.append(list(color_map_fn(i)[:3]))
cmp_list = np.array(cmp_list)
values = deepcopy(values)
assert len(values.shape) == 1, \
"The point values should be in 1-D, but actually got: {}-D".format(len(values.shape))
if np.std(values) < 1e-3:
logging.warning('Input values have low variance (less than 1e-3), please make sure you input the correct data.')
values = np.zeros_like(values)
else:
values -= np.percentile(values, 2)
values /= np.percentile(values, 98)
colors = np.zeros((len(values), 3), dtype=np.float32)
for i in range(len(values)):
colors[i, :] = get_rgb_from_ratio(cmp_list, values[i])
return colors * 255
def create_dir(path, clean=False):
try:
os.makedirs(path)
except OSError:
if not clean:
logging.warning("Dir {} already exists, operation skipped.".format(path))
else:
os.system('rm -r {}'.format(path))
os.makedirs(path)
logging.warning("Dir {} already exists and has been cleaned.".format(path))
def coors_normalize(coors):
norm = np.max(np.percentile(np.abs(coors), 90, axis=0))
if norm == 0:
norm = 1.
logging.warning("Input points has very small coordinates, please make sure you input the correct data.")
return coors / norm, norm
| [
"copy.deepcopy",
"numpy.zeros_like",
"numpy.abs",
"os.makedirs",
"matplotlib.cm.get_cmap",
"numpy.std",
"logging.warning",
"numpy.floor",
"numpy.percentile",
"numpy.array",
"numpy.arange"
] | [((1426, 1463), 'numpy.array', 'np.array', (['[cmp_rgb]'], {'dtype': 'np.float32'}), '([cmp_rgb], dtype=np.float32)\n', (1434, 1463), True, 'import numpy as np\n'), ((1808, 1832), 'numpy.arange', 'np.arange', (['(0.0)', '(1.1)', '(0.1)'], {}), '(0.0, 1.1, 0.1)\n', (1817, 1832), True, 'import numpy as np\n'), ((1899, 1917), 'numpy.array', 'np.array', (['cmp_list'], {}), '(cmp_list)\n', (1907, 1917), True, 'import numpy as np\n'), ((1931, 1947), 'copy.deepcopy', 'deepcopy', (['values'], {}), '(values)\n', (1939, 1947), False, 'from copy import deepcopy\n'), ((1110, 1127), 'numpy.floor', 'np.floor', (['cmp_idx'], {}), '(cmp_idx)\n', (1118, 1127), True, 'import numpy as np\n'), ((1336, 1353), 'numpy.floor', 'np.floor', (['cmp_idx'], {}), '(cmp_idx)\n', (1344, 1353), True, 'import numpy as np\n'), ((1548, 1572), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['cmp'], {'lut': '(10)'}), '(cmp, lut=10)\n', (1559, 1572), False, 'from matplotlib import cm\n'), ((2086, 2100), 'numpy.std', 'np.std', (['values'], {}), '(values)\n', (2092, 2100), True, 'import numpy as np\n'), ((2117, 2239), 'logging.warning', 'logging.warning', (['"""Input values have low variance (less than 1e-3), please make sure you input the correct data."""'], {}), "(\n 'Input values have low variance (less than 1e-3), please make sure you input the correct data.'\n )\n", (2132, 2239), False, 'import logging\n'), ((2247, 2268), 'numpy.zeros_like', 'np.zeros_like', (['values'], {}), '(values)\n', (2260, 2268), True, 'import numpy as np\n'), ((2297, 2321), 'numpy.percentile', 'np.percentile', (['values', '(2)'], {}), '(values, 2)\n', (2310, 2321), True, 'import numpy as np\n'), ((2340, 2365), 'numpy.percentile', 'np.percentile', (['values', '(98)'], {}), '(values, 98)\n', (2353, 2365), True, 'import numpy as np\n'), ((2598, 2615), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (2609, 2615), False, 'import os\n'), ((3057, 3171), 'logging.warning', 'logging.warning', (['"""Input points has very small coordinates, please make sure you input the correct data."""'], {}), "(\n 'Input points has very small coordinates, please make sure you input the correct data.'\n )\n", (3072, 3171), False, 'import logging\n'), ((1746, 1776), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""viridis"""'], {'lut': '(10)'}), "('viridis', lut=10)\n", (1757, 1776), False, 'from matplotlib import cm\n'), ((2985, 2998), 'numpy.abs', 'np.abs', (['coors'], {}), '(coors)\n', (2991, 2998), True, 'import numpy as np\n'), ((2817, 2834), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (2828, 2834), False, 'import os\n')] |
import os
import warnings
from math import ceil
import numpy as np
import cutde.backend as backend
source_dir = os.path.dirname(os.path.realpath(__file__))
DISP_FS = ("disp_fs", 3)
STRAIN_FS = ("strain_fs", 6)
DISP_HS = ("disp_hs", 3)
STRAIN_HS = ("strain_hs", 6)
class Placeholder:
pass
placeholder = Placeholder()
def solve_types(obs_pts, tris, slips):
type_map = {
np.int32: np.float32,
np.int64: np.float64,
np.float32: np.float32,
np.float64: np.float64,
}
float_type = None
out_arrs = []
for name, arr in [("obs_pts", obs_pts), ("tris", tris), ("slips", slips)]:
if isinstance(arr, Placeholder):
out_arrs.append(arr)
continue
dtype = arr.dtype.type
if dtype not in type_map:
raise ValueError(
f"The {name} input array has type {arr.dtype} but must have a float or"
" integer dtype."
)
if float_type is None:
float_type = type_map[dtype]
# If we're using OpenCL, we need to check if float64 is allowed.
# If not, convert to float32.
if backend.which_backend == "opencl":
import cutde.opencl
cutde.opencl.ensure_initialized()
extensions = (
cutde.opencl.gpu_ctx.devices[0].extensions.strip().split(" ")
)
if "cl_khr_fp64" not in extensions and float_type is np.float64:
warnings.warn(
"The OpenCL implementation being used does not support "
"float64. This will require converting arrays to float32."
)
float_type = np.float32
out_arr = arr
if dtype != float_type:
warnings.warn(
f"The {name} input array has type {out_arr.dtype} but needs "
"to be converted"
f" to dtype {np.dtype(float_type)}. Converting {name} to "
f"{np.dtype(float_type)} may be expensive."
)
out_arr = out_arr.astype(float_type)
if out_arr.flags.f_contiguous:
warnings.warn(
f"The {name} input array has Fortran ordering. "
"Converting to C ordering. This may be expensive."
)
out_arr = np.ascontiguousarray(out_arr)
out_arrs.append(out_arr)
return float_type, out_arrs
def check_inputs(obs_pts, tris, slips):
if obs_pts.shape[1] != 3:
raise ValueError(
"The second dimension of the obs_pts array must be 3 because the "
"observation points should be locations in three-dimensional space."
)
if tris.shape[1] != 3:
raise ValueError(
"The second dimension of the tris array must be 3 because there must be "
"three vertices per triangle."
)
if tris.shape[2] != 3:
raise ValueError(
"The third dimension of the tris array must be 3 because the triangle "
"vertices should be locations in three-dimensional space."
)
if not isinstance(slips, Placeholder) and (slips.shape[0] != tris.shape[0]):
raise ValueError(
"The number of input slip vectors must be equal to the number of input"
" triangles."
)
if not isinstance(slips, Placeholder) and (slips.shape[1] != 3):
raise ValueError(
"The second dimension of the slips array must be 3 because each row "
"should be a vector in the TDE coordinate system (strike-slip, dip-slip,"
" tensile-slip)."
)
def call_clu(obs_pts, tris, slips, nu, fnc):
fnc_name, vec_dim = fnc
if tris.shape[0] != obs_pts.shape[0]:
raise ValueError("There must be one input observation point per triangle.")
check_inputs(obs_pts, tris, slips)
float_type, (obs_pts, tris, slips) = solve_types(obs_pts, tris, slips)
n = obs_pts.shape[0]
block_size = backend.max_block_size(16)
n_blocks = int(np.ceil(n / block_size))
gpu_config = dict(
block_size=block_size, float_type=backend.np_to_c_type(float_type)
)
module = backend.load_module("pairs.cu", tmpl_args=gpu_config, tmpl_dir=source_dir)
gpu_results = backend.empty(n * vec_dim, float_type)
gpu_obs_pts = backend.to(obs_pts, float_type)
gpu_tris = backend.to(tris, float_type)
gpu_slips = backend.to(slips, float_type)
getattr(module, "pairs_" + fnc_name)(
gpu_results,
np.int32(n),
gpu_obs_pts,
gpu_tris,
gpu_slips,
float_type(nu),
(n_blocks, 1, 1),
(block_size, 1, 1),
)
out = backend.get(gpu_results).reshape((n, vec_dim))
return out
def call_clu_matrix(obs_pts, tris, nu, fnc):
fnc_name, vec_dim = fnc
check_inputs(obs_pts, tris, placeholder)
float_type, (obs_pts, tris, _) = solve_types(obs_pts, tris, placeholder)
n_obs = obs_pts.shape[0]
n_src = tris.shape[0]
block_size = backend.max_block_size(16)
n_obs_blocks = int(np.ceil(n_obs / block_size))
n_src_blocks = int(np.ceil(n_src / block_size))
gpu_config = dict(float_type=backend.np_to_c_type(float_type))
module = backend.load_module("matrix.cu", tmpl_args=gpu_config, tmpl_dir=source_dir)
gpu_results = backend.empty(n_obs * vec_dim * n_src * 3, float_type)
gpu_obs_pts = backend.to(obs_pts, float_type)
gpu_tris = backend.to(tris, float_type)
getattr(module, "matrix_" + fnc_name)(
gpu_results,
np.int32(n_obs),
np.int32(n_src),
gpu_obs_pts,
gpu_tris,
float_type(nu),
(n_obs_blocks, n_src_blocks, 1),
(block_size, block_size, 1),
)
out = backend.get(gpu_results).reshape((n_obs, vec_dim, n_src, 3))
return out
def call_clu_free(obs_pts, tris, slips, nu, fnc):
fnc_name, vec_dim = fnc
check_inputs(obs_pts, tris, slips)
float_type, (obs_pts, tris, slips) = solve_types(obs_pts, tris, slips)
n_obs = obs_pts.shape[0]
n_src = tris.shape[0]
block_size = backend.max_block_size(256)
gpu_obs_pts = backend.to(obs_pts, float_type)
gpu_tris = backend.to(tris, float_type)
gpu_slips = backend.to(slips, float_type)
gpu_results = backend.zeros(n_obs * vec_dim, float_type)
n_obs_blocks = int(np.ceil(n_obs / block_size))
gpu_config = dict(float_type=backend.np_to_c_type(float_type))
module = backend.load_module("free.cu", tmpl_args=gpu_config, tmpl_dir=source_dir)
# Split up the sources into chunks so that we don't completely overwhelm a
# single GPU machine and cause the screen to lock up.
default_chunk_size = 64
n_chunks = int(ceil(n_src / default_chunk_size))
out = np.zeros((n_obs, vec_dim), dtype=float_type)
for i in range(n_chunks):
chunk_start = i * default_chunk_size
chunk_size = min(n_src - chunk_start, default_chunk_size)
chunk_end = chunk_start + chunk_size
getattr(module, "free_" + fnc_name)(
gpu_results,
np.int32(n_obs),
np.int32(n_src),
np.int32(chunk_start),
np.int32(chunk_end),
gpu_obs_pts,
gpu_tris,
gpu_slips,
float_type(nu),
(n_obs_blocks, 1, 1),
(block_size, 1, 1),
)
out += backend.get(gpu_results).reshape((n_obs, vec_dim))
return out
def process_block_inputs(obs_start, obs_end, src_start, src_end):
out_arrs = []
for name, a in [
("obs_start", obs_start),
("obs_end", obs_end),
("src_start", src_start),
("src_end", src_end),
]:
a = np.array(a)
if a.shape[0] != np.array(obs_start).shape[0]:
raise ValueError(f"The length of {name} must match obs_start.")
if not (a.dtype.type is np.int32 or a.dtype.type is np.int64):
raise ValueError(f"The {name} array must have integer type.")
# Don't bother warning for these conversions since the cost of
# converting a single value per block is tiny.
if a.flags.f_contiguous or a.dtype.type != np.int32:
a = np.ascontiguousarray(a, dtype=np.int32)
out_arrs.append(a)
return out_arrs
def call_clu_block(obs_pts, tris, obs_start, obs_end, src_start, src_end, nu, fnc):
fnc_name, vec_dim = fnc
check_inputs(obs_pts, tris, placeholder)
float_type, (obs_pts, tris, _) = solve_types(obs_pts, tris, placeholder)
obs_start, obs_end, src_start, src_end = process_block_inputs(
obs_start, obs_end, src_start, src_end
)
block_sizes = vec_dim * 3 * (obs_end - obs_start) * (src_end - src_start)
block_end = np.cumsum(block_sizes)
block_start = np.empty(block_end.shape[0] + 1, dtype=block_end.dtype)
block_start[:-1] = block_end - block_sizes
block_start[-1] = block_end[-1]
n_blocks = obs_end.shape[0]
team_size = backend.max_block_size(16)
gpu_config = dict(float_type=backend.np_to_c_type(float_type))
module = backend.load_module("blocks.cu", tmpl_args=gpu_config, tmpl_dir=source_dir)
gpu_results = backend.zeros(block_end[-1], float_type)
gpu_obs_pts = backend.to(obs_pts, float_type)
gpu_tris = backend.to(tris, float_type)
gpu_obs_start = backend.to(obs_start, np.int32)
gpu_obs_end = backend.to(obs_end, np.int32)
gpu_src_start = backend.to(src_start, np.int32)
gpu_src_end = backend.to(src_end, np.int32)
gpu_block_start = backend.to(block_start, np.int32)
getattr(module, "blocks_" + fnc_name)(
gpu_results,
gpu_obs_pts,
gpu_tris,
gpu_obs_start,
gpu_obs_end,
gpu_src_start,
gpu_src_end,
gpu_block_start,
float_type(nu),
(n_blocks, 1, 1),
(team_size, 1, 1),
)
return backend.get(gpu_results), block_start
| [
"numpy.ceil",
"cutde.backend.get",
"math.ceil",
"cutde.backend.load_module",
"os.path.realpath",
"cutde.backend.max_block_size",
"cutde.backend.to",
"numpy.zeros",
"numpy.empty",
"numpy.ascontiguousarray",
"numpy.dtype",
"numpy.cumsum",
"cutde.backend.np_to_c_type",
"cutde.backend.zeros",
... | [((131, 157), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (147, 157), False, 'import os\n'), ((4062, 4088), 'cutde.backend.max_block_size', 'backend.max_block_size', (['(16)'], {}), '(16)\n', (4084, 4088), True, 'import cutde.backend as backend\n'), ((4250, 4324), 'cutde.backend.load_module', 'backend.load_module', (['"""pairs.cu"""'], {'tmpl_args': 'gpu_config', 'tmpl_dir': 'source_dir'}), "('pairs.cu', tmpl_args=gpu_config, tmpl_dir=source_dir)\n", (4269, 4324), True, 'import cutde.backend as backend\n'), ((4344, 4382), 'cutde.backend.empty', 'backend.empty', (['(n * vec_dim)', 'float_type'], {}), '(n * vec_dim, float_type)\n', (4357, 4382), True, 'import cutde.backend as backend\n'), ((4401, 4432), 'cutde.backend.to', 'backend.to', (['obs_pts', 'float_type'], {}), '(obs_pts, float_type)\n', (4411, 4432), True, 'import cutde.backend as backend\n'), ((4448, 4476), 'cutde.backend.to', 'backend.to', (['tris', 'float_type'], {}), '(tris, float_type)\n', (4458, 4476), True, 'import cutde.backend as backend\n'), ((4493, 4522), 'cutde.backend.to', 'backend.to', (['slips', 'float_type'], {}), '(slips, float_type)\n', (4503, 4522), True, 'import cutde.backend as backend\n'), ((5092, 5118), 'cutde.backend.max_block_size', 'backend.max_block_size', (['(16)'], {}), '(16)\n', (5114, 5118), True, 'import cutde.backend as backend\n'), ((5303, 5378), 'cutde.backend.load_module', 'backend.load_module', (['"""matrix.cu"""'], {'tmpl_args': 'gpu_config', 'tmpl_dir': 'source_dir'}), "('matrix.cu', tmpl_args=gpu_config, tmpl_dir=source_dir)\n", (5322, 5378), True, 'import cutde.backend as backend\n'), ((5398, 5452), 'cutde.backend.empty', 'backend.empty', (['(n_obs * vec_dim * n_src * 3)', 'float_type'], {}), '(n_obs * vec_dim * n_src * 3, float_type)\n', (5411, 5452), True, 'import cutde.backend as backend\n'), ((5471, 5502), 'cutde.backend.to', 'backend.to', (['obs_pts', 'float_type'], {}), '(obs_pts, float_type)\n', (5481, 5502), True, 'import cutde.backend as backend\n'), ((5518, 5546), 'cutde.backend.to', 'backend.to', (['tris', 'float_type'], {}), '(tris, float_type)\n', (5528, 5546), True, 'import cutde.backend as backend\n'), ((6162, 6189), 'cutde.backend.max_block_size', 'backend.max_block_size', (['(256)'], {}), '(256)\n', (6184, 6189), True, 'import cutde.backend as backend\n'), ((6209, 6240), 'cutde.backend.to', 'backend.to', (['obs_pts', 'float_type'], {}), '(obs_pts, float_type)\n', (6219, 6240), True, 'import cutde.backend as backend\n'), ((6256, 6284), 'cutde.backend.to', 'backend.to', (['tris', 'float_type'], {}), '(tris, float_type)\n', (6266, 6284), True, 'import cutde.backend as backend\n'), ((6301, 6330), 'cutde.backend.to', 'backend.to', (['slips', 'float_type'], {}), '(slips, float_type)\n', (6311, 6330), True, 'import cutde.backend as backend\n'), ((6349, 6391), 'cutde.backend.zeros', 'backend.zeros', (['(n_obs * vec_dim)', 'float_type'], {}), '(n_obs * vec_dim, float_type)\n', (6362, 6391), True, 'import cutde.backend as backend\n'), ((6525, 6598), 'cutde.backend.load_module', 'backend.load_module', (['"""free.cu"""'], {'tmpl_args': 'gpu_config', 'tmpl_dir': 'source_dir'}), "('free.cu', tmpl_args=gpu_config, tmpl_dir=source_dir)\n", (6544, 6598), True, 'import cutde.backend as backend\n'), ((6828, 6872), 'numpy.zeros', 'np.zeros', (['(n_obs, vec_dim)'], {'dtype': 'float_type'}), '((n_obs, vec_dim), dtype=float_type)\n', (6836, 6872), True, 'import numpy as np\n'), ((8797, 8819), 'numpy.cumsum', 'np.cumsum', (['block_sizes'], {}), '(block_sizes)\n', (8806, 8819), True, 'import numpy as np\n'), ((8838, 8893), 'numpy.empty', 'np.empty', (['(block_end.shape[0] + 1)'], {'dtype': 'block_end.dtype'}), '(block_end.shape[0] + 1, dtype=block_end.dtype)\n', (8846, 8893), True, 'import numpy as np\n'), ((9026, 9052), 'cutde.backend.max_block_size', 'backend.max_block_size', (['(16)'], {}), '(16)\n', (9048, 9052), True, 'import cutde.backend as backend\n'), ((9133, 9208), 'cutde.backend.load_module', 'backend.load_module', (['"""blocks.cu"""'], {'tmpl_args': 'gpu_config', 'tmpl_dir': 'source_dir'}), "('blocks.cu', tmpl_args=gpu_config, tmpl_dir=source_dir)\n", (9152, 9208), True, 'import cutde.backend as backend\n'), ((9228, 9268), 'cutde.backend.zeros', 'backend.zeros', (['block_end[-1]', 'float_type'], {}), '(block_end[-1], float_type)\n', (9241, 9268), True, 'import cutde.backend as backend\n'), ((9287, 9318), 'cutde.backend.to', 'backend.to', (['obs_pts', 'float_type'], {}), '(obs_pts, float_type)\n', (9297, 9318), True, 'import cutde.backend as backend\n'), ((9334, 9362), 'cutde.backend.to', 'backend.to', (['tris', 'float_type'], {}), '(tris, float_type)\n', (9344, 9362), True, 'import cutde.backend as backend\n'), ((9383, 9414), 'cutde.backend.to', 'backend.to', (['obs_start', 'np.int32'], {}), '(obs_start, np.int32)\n', (9393, 9414), True, 'import cutde.backend as backend\n'), ((9433, 9462), 'cutde.backend.to', 'backend.to', (['obs_end', 'np.int32'], {}), '(obs_end, np.int32)\n', (9443, 9462), True, 'import cutde.backend as backend\n'), ((9483, 9514), 'cutde.backend.to', 'backend.to', (['src_start', 'np.int32'], {}), '(src_start, np.int32)\n', (9493, 9514), True, 'import cutde.backend as backend\n'), ((9533, 9562), 'cutde.backend.to', 'backend.to', (['src_end', 'np.int32'], {}), '(src_end, np.int32)\n', (9543, 9562), True, 'import cutde.backend as backend\n'), ((9585, 9618), 'cutde.backend.to', 'backend.to', (['block_start', 'np.int32'], {}), '(block_start, np.int32)\n', (9595, 9618), True, 'import cutde.backend as backend\n'), ((4108, 4131), 'numpy.ceil', 'np.ceil', (['(n / block_size)'], {}), '(n / block_size)\n', (4115, 4131), True, 'import numpy as np\n'), ((4595, 4606), 'numpy.int32', 'np.int32', (['n'], {}), '(n)\n', (4603, 4606), True, 'import numpy as np\n'), ((5142, 5169), 'numpy.ceil', 'np.ceil', (['(n_obs / block_size)'], {}), '(n_obs / block_size)\n', (5149, 5169), True, 'import numpy as np\n'), ((5194, 5221), 'numpy.ceil', 'np.ceil', (['(n_src / block_size)'], {}), '(n_src / block_size)\n', (5201, 5221), True, 'import numpy as np\n'), ((5620, 5635), 'numpy.int32', 'np.int32', (['n_obs'], {}), '(n_obs)\n', (5628, 5635), True, 'import numpy as np\n'), ((5645, 5660), 'numpy.int32', 'np.int32', (['n_src'], {}), '(n_src)\n', (5653, 5660), True, 'import numpy as np\n'), ((6416, 6443), 'numpy.ceil', 'np.ceil', (['(n_obs / block_size)'], {}), '(n_obs / block_size)\n', (6423, 6443), True, 'import numpy as np\n'), ((6784, 6816), 'math.ceil', 'ceil', (['(n_src / default_chunk_size)'], {}), '(n_src / default_chunk_size)\n', (6788, 6816), False, 'from math import ceil\n'), ((7765, 7776), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (7773, 7776), True, 'import numpy as np\n'), ((9930, 9954), 'cutde.backend.get', 'backend.get', (['gpu_results'], {}), '(gpu_results)\n', (9941, 9954), True, 'import cutde.backend as backend\n'), ((2215, 2336), 'warnings.warn', 'warnings.warn', (['f"""The {name} input array has Fortran ordering. Converting to C ordering. This may be expensive."""'], {}), "(\n f'The {name} input array has Fortran ordering. Converting to C ordering. This may be expensive.'\n )\n", (2228, 2336), False, 'import warnings\n'), ((2398, 2427), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['out_arr'], {}), '(out_arr)\n', (2418, 2427), True, 'import numpy as np\n'), ((4198, 4230), 'cutde.backend.np_to_c_type', 'backend.np_to_c_type', (['float_type'], {}), '(float_type)\n', (4218, 4230), True, 'import cutde.backend as backend\n'), ((4760, 4784), 'cutde.backend.get', 'backend.get', (['gpu_results'], {}), '(gpu_results)\n', (4771, 4784), True, 'import cutde.backend as backend\n'), ((5256, 5288), 'cutde.backend.np_to_c_type', 'backend.np_to_c_type', (['float_type'], {}), '(float_type)\n', (5276, 5288), True, 'import cutde.backend as backend\n'), ((5819, 5843), 'cutde.backend.get', 'backend.get', (['gpu_results'], {}), '(gpu_results)\n', (5830, 5843), True, 'import cutde.backend as backend\n'), ((6478, 6510), 'cutde.backend.np_to_c_type', 'backend.np_to_c_type', (['float_type'], {}), '(float_type)\n', (6498, 6510), True, 'import cutde.backend as backend\n'), ((7142, 7157), 'numpy.int32', 'np.int32', (['n_obs'], {}), '(n_obs)\n', (7150, 7157), True, 'import numpy as np\n'), ((7171, 7186), 'numpy.int32', 'np.int32', (['n_src'], {}), '(n_src)\n', (7179, 7186), True, 'import numpy as np\n'), ((7200, 7221), 'numpy.int32', 'np.int32', (['chunk_start'], {}), '(chunk_start)\n', (7208, 7221), True, 'import numpy as np\n'), ((7235, 7254), 'numpy.int32', 'np.int32', (['chunk_end'], {}), '(chunk_end)\n', (7243, 7254), True, 'import numpy as np\n'), ((8257, 8296), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['a'], {'dtype': 'np.int32'}), '(a, dtype=np.int32)\n', (8277, 8296), True, 'import numpy as np\n'), ((9086, 9118), 'cutde.backend.np_to_c_type', 'backend.np_to_c_type', (['float_type'], {}), '(float_type)\n', (9106, 9118), True, 'import cutde.backend as backend\n'), ((7445, 7469), 'cutde.backend.get', 'backend.get', (['gpu_results'], {}), '(gpu_results)\n', (7456, 7469), True, 'import cutde.backend as backend\n'), ((1526, 1663), 'warnings.warn', 'warnings.warn', (['"""The OpenCL implementation being used does not support float64. This will require converting arrays to float32."""'], {}), "(\n 'The OpenCL implementation being used does not support float64. This will require converting arrays to float32.'\n )\n", (1539, 1663), False, 'import warnings\n'), ((7802, 7821), 'numpy.array', 'np.array', (['obs_start'], {}), '(obs_start)\n', (7810, 7821), True, 'import numpy as np\n'), ((1994, 2014), 'numpy.dtype', 'np.dtype', (['float_type'], {}), '(float_type)\n', (2002, 2014), True, 'import numpy as np\n'), ((2059, 2079), 'numpy.dtype', 'np.dtype', (['float_type'], {}), '(float_type)\n', (2067, 2079), True, 'import numpy as np\n')] |
import os
import numpy as np
from dipy.data import get_data
from dipy.reconst.gqi import GeneralizedQSampling
from dipy.reconst.dti import Tensor
from dipy.tracking.propagation import EuDX
from dipy.tracking.propspeed import ndarray_offset
from dipy.tracking.metrics import length
import nibabel as ni
from nose.tools import assert_true, assert_false, \
assert_equal, assert_raises, assert_almost_equal
from numpy.testing import assert_array_equal, assert_array_almost_equal
def test_eudx():
#read bvals,gradients and data
fimg,fbvals, fbvecs = get_data('small_64D')
bvals=np.load(fbvals)
gradients=np.load(fbvecs)
img =ni.load(fimg)
data=img.get_data()
print(data.shape)
gqs = GeneralizedQSampling(data,bvals,gradients)
ten = Tensor(data,bvals,gradients,thresh=50)
seed_list=np.dot(np.diag(np.arange(10)),np.ones((10,3)))
iT=iter(EuDX(gqs.qa(),gqs.ind(),seeds=seed_list))
T=[]
for t in iT:
T.append(t)
iT2=iter(EuDX(ten.fa(),ten.ind(),seeds=seed_list))
T2=[]
for t in iT2:
T2.append(t)
print('length T ',sum([length(t) for t in T]))
print('length T2',sum([length(t) for t in T2]))
print(gqs.QA[1,4,8,0])
print(gqs.QA.ravel()[ndarray_offset(np.array([1,4,8,0]),np.array(gqs.QA.strides),4,8)])
assert_almost_equal(gqs.QA[1,4,8,0], gqs.QA.ravel()[ndarray_offset(np.array([1,4,8,0]),np.array(gqs.QA.strides),4,8)])
assert_almost_equal(sum([length(t) for t in T ]) , 70.999996185302734,places=3)
assert_almost_equal(sum([length(t) for t in T2]) , 56.999997615814209,places=3)
def test_eudx_further():
""" Cause we love testin.. ;-)
"""
fimg,fbvals,fbvecs=get_data('small_101D')
img=ni.load(fimg)
affine=img.get_affine()
bvals=np.loadtxt(fbvals)
gradients=np.loadtxt(fbvecs).T
data=img.get_data()
ten=Tensor(data,bvals,gradients,thresh=50)
x,y,z=data.shape[:3]
seeds=np.zeros((10**4,3))
for i in range(10**4):
rx=(x-1)*np.random.rand()
ry=(y-1)*np.random.rand()
rz=(z-1)*np.random.rand()
seeds[i]=np.ascontiguousarray(np.array([rx,ry,rz]),dtype=np.float64)
#print seeds
#"""
eu=EuDX(a=ten.fa(),ind=ten.ind(),seeds=seeds,a_low=.2)
T=[e for e in eu]
#check that there are no negative elements
for t in T:
assert_equal(np.sum(t.ravel()<0),0)
"""
for (i,t) in enumerate(T):
for row in t:
if row[0]<0 or row[1]<0 or row[2]<0:
print 'l======'
print i,row
print t[0]
print t[-1]
if row[0]>=data.shape[0] or row[1]>=data.shape[1] or row[2]>=data.shape[2]:
print 'h======'
print i,row
print t[0]
print t[-1]
from dipy.viz import fvtk
r=fvtk.ren()
fvtk.add(r,fvtk.line(T,fvtk.red))
fvtk.add(r,fvtk.point(seeds,fvtk.green))
fvtk.show(r)
"""
def uniform_seed_grid():
#read bvals,gradients and data
fimg,fbvals, fbvecs = get_data('small_64D')
bvals=np.load(fbvals)
gradients=np.load(fbvecs)
img =ni.load(fimg)
data=img.get_data()
x,y,z,g=data.shape
M=np.mgrid[.5:x-.5:np.complex(0,x),.5:y-.5:np.complex(0,y),.5:z-.5:np.complex(0,z)]
M=M.reshape(3,x*y*z).T
print(M.shape)
print(M.dtype)
for m in M:
print(m)
gqs = GeneralizedQSampling(data,bvals,gradients)
iT=iter(EuDX(gqs.QA,gqs.IN,seeds=M))
T=[]
for t in iT:
T.append(i)
print('lenT',len(T))
assert_equal(len(T), 1221)
| [
"numpy.load",
"dipy.tracking.metrics.length",
"nibabel.load",
"numpy.zeros",
"dipy.data.get_data",
"numpy.ones",
"numpy.arange",
"numpy.loadtxt",
"numpy.array",
"dipy.tracking.propagation.EuDX",
"numpy.random.rand",
"dipy.reconst.gqi.GeneralizedQSampling",
"dipy.reconst.dti.Tensor",
"numpy... | [((568, 589), 'dipy.data.get_data', 'get_data', (['"""small_64D"""'], {}), "('small_64D')\n", (576, 589), False, 'from dipy.data import get_data\n'), ((604, 619), 'numpy.load', 'np.load', (['fbvals'], {}), '(fbvals)\n', (611, 619), True, 'import numpy as np\n'), ((634, 649), 'numpy.load', 'np.load', (['fbvecs'], {}), '(fbvecs)\n', (641, 649), True, 'import numpy as np\n'), ((659, 672), 'nibabel.load', 'ni.load', (['fimg'], {}), '(fimg)\n', (666, 672), True, 'import nibabel as ni\n'), ((742, 786), 'dipy.reconst.gqi.GeneralizedQSampling', 'GeneralizedQSampling', (['data', 'bvals', 'gradients'], {}), '(data, bvals, gradients)\n', (762, 786), False, 'from dipy.reconst.gqi import GeneralizedQSampling\n'), ((802, 843), 'dipy.reconst.dti.Tensor', 'Tensor', (['data', 'bvals', 'gradients'], {'thresh': '(50)'}), '(data, bvals, gradients, thresh=50)\n', (808, 843), False, 'from dipy.reconst.dti import Tensor\n'), ((1735, 1757), 'dipy.data.get_data', 'get_data', (['"""small_101D"""'], {}), "('small_101D')\n", (1743, 1757), False, 'from dipy.data import get_data\n'), ((1766, 1779), 'nibabel.load', 'ni.load', (['fimg'], {}), '(fimg)\n', (1773, 1779), True, 'import nibabel as ni\n'), ((1818, 1836), 'numpy.loadtxt', 'np.loadtxt', (['fbvals'], {}), '(fbvals)\n', (1828, 1836), True, 'import numpy as np\n'), ((1908, 1949), 'dipy.reconst.dti.Tensor', 'Tensor', (['data', 'bvals', 'gradients'], {'thresh': '(50)'}), '(data, bvals, gradients, thresh=50)\n', (1914, 1949), False, 'from dipy.reconst.dti import Tensor\n'), ((1982, 2004), 'numpy.zeros', 'np.zeros', (['(10 ** 4, 3)'], {}), '((10 ** 4, 3))\n', (1990, 2004), True, 'import numpy as np\n'), ((3177, 3198), 'dipy.data.get_data', 'get_data', (['"""small_64D"""'], {}), "('small_64D')\n", (3185, 3198), False, 'from dipy.data import get_data\n'), ((3213, 3228), 'numpy.load', 'np.load', (['fbvals'], {}), '(fbvals)\n', (3220, 3228), True, 'import numpy as np\n'), ((3243, 3258), 'numpy.load', 'np.load', (['fbvecs'], {}), '(fbvecs)\n', (3250, 3258), True, 'import numpy as np\n'), ((3268, 3281), 'nibabel.load', 'ni.load', (['fimg'], {}), '(fimg)\n', (3275, 3281), True, 'import nibabel as ni\n'), ((3541, 3585), 'dipy.reconst.gqi.GeneralizedQSampling', 'GeneralizedQSampling', (['data', 'bvals', 'gradients'], {}), '(data, bvals, gradients)\n', (3561, 3585), False, 'from dipy.reconst.gqi import GeneralizedQSampling\n'), ((885, 901), 'numpy.ones', 'np.ones', (['(10, 3)'], {}), '((10, 3))\n', (892, 901), True, 'import numpy as np\n'), ((1851, 1869), 'numpy.loadtxt', 'np.loadtxt', (['fbvecs'], {}), '(fbvecs)\n', (1861, 1869), True, 'import numpy as np\n'), ((3596, 3625), 'dipy.tracking.propagation.EuDX', 'EuDX', (['gqs.QA', 'gqs.IN'], {'seeds': 'M'}), '(gqs.QA, gqs.IN, seeds=M)\n', (3600, 3625), False, 'from dipy.tracking.propagation import EuDX\n'), ((870, 883), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (879, 883), True, 'import numpy as np\n'), ((2046, 2062), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2060, 2062), True, 'import numpy as np\n'), ((2080, 2096), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2094, 2096), True, 'import numpy as np\n'), ((2114, 2130), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2128, 2130), True, 'import numpy as np\n'), ((2181, 2203), 'numpy.array', 'np.array', (['[rx, ry, rz]'], {}), '([rx, ry, rz])\n', (2189, 2203), True, 'import numpy as np\n'), ((1148, 1157), 'dipy.tracking.metrics.length', 'length', (['t'], {}), '(t)\n', (1154, 1157), False, 'from dipy.tracking.metrics import length\n'), ((1201, 1210), 'dipy.tracking.metrics.length', 'length', (['t'], {}), '(t)\n', (1207, 1210), False, 'from dipy.tracking.metrics import length\n'), ((1296, 1318), 'numpy.array', 'np.array', (['[1, 4, 8, 0]'], {}), '([1, 4, 8, 0])\n', (1304, 1318), True, 'import numpy as np\n'), ((1316, 1340), 'numpy.array', 'np.array', (['gqs.QA.strides'], {}), '(gqs.QA.strides)\n', (1324, 1340), True, 'import numpy as np\n'), ((1420, 1442), 'numpy.array', 'np.array', (['[1, 4, 8, 0]'], {}), '([1, 4, 8, 0])\n', (1428, 1442), True, 'import numpy as np\n'), ((1440, 1464), 'numpy.array', 'np.array', (['gqs.QA.strides'], {}), '(gqs.QA.strides)\n', (1448, 1464), True, 'import numpy as np\n'), ((1502, 1511), 'dipy.tracking.metrics.length', 'length', (['t'], {}), '(t)\n', (1508, 1511), False, 'from dipy.tracking.metrics import length\n'), ((1586, 1595), 'dipy.tracking.metrics.length', 'length', (['t'], {}), '(t)\n', (1592, 1595), False, 'from dipy.tracking.metrics import length\n'), ((3365, 3381), 'numpy.complex', 'np.complex', (['(0)', 'x'], {}), '(0, x)\n', (3375, 3381), True, 'import numpy as np\n'), ((3389, 3405), 'numpy.complex', 'np.complex', (['(0)', 'y'], {}), '(0, y)\n', (3399, 3405), True, 'import numpy as np\n'), ((3413, 3429), 'numpy.complex', 'np.complex', (['(0)', 'z'], {}), '(0, z)\n', (3423, 3429), True, 'import numpy as np\n')] |
"""feature_importance.py
Computes feature contribution scores via DeepLIFT (Shrikumar et al., 2016) &
determines most important features via paired t-test with adjustment
for multiple comparisons (Bonferroni correction) using said scores.
Requires: NumPy, SciPy, DeepLIFT (and their dependencies)
Author: <NAME>, Rzhetsky Lab
Copyright: 2018, all rights reserved
"""
from __future__ import print_function
from collections import OrderedDict
from os.path import abspath
from os.path import dirname
import sys
import time
import numpy as np
from scipy import stats
from .models import chunks
from .summary import Summary
# import deeplift, configure path if not already installed
sys.path.append(dirname(dirname(abspath(__file__))) + '/deeplift')
from deeplift.conversion import kerasapi_conversion as kc
from deeplift.layers import NonlinearMxtsMode
# how to handle floating pt errs
np.seterr(divide='ignore', over='raise', under='raise')
class FeatureImportanceSummary(Summary):
"""Feature importance summary."""
def __init__(self, sums_D, sums_D2, idx_feat_dict, idx_class_dict,
icd9_descript_dict, pairs, num_sample):
"""Initialize feature importance summary.
Arguments:
sums_D: np.ndarray, float
2-D array of sums of differences in DeepLIFT contribution scores
with shape (num_pair, num_feature); the outer (0) dim represents the
pair of compared classes, and the inner dim (1) represents the sum
of differences in scores across features
sums_D2: np.ndarray, float
2-D array of sums of squared differences in DeepLIFT contribution
scores with shape (num_pair, num_feature); the outer (0) dim
represents the pair of compared classes, and the inner dim (1)
represents the sum of squared differences in scores across features
idx_feat_dict: {int: string}
dictionary mapping feature indices to features
idx_class_dict: {int: string}
dictionary mapping class indices to classes
icd9_descript_dict: {string: string}
dictionary mapping ICD9 codes to description text
pairs: [(int, int)]
list of pairs of classes which were compared during interpretation
num_sample: int
number of samples present in the dataset
"""
num_feature = len(idx_feat_dict)
num_pair = len(pairs)
unadjusted_t_values, p_values = _paired_ttest_with_diff_sums(
sums_D, sums_D2, pairs=pairs, num_sample=num_sample)
list_unadjusted_t, list_p = _get_list_signif_scores(
unadjusted_t_values, p_values)
list_pairs = _get_list_pairs(pairs, idx_class_dict=idx_class_dict,
num_feature=num_feature)
list_feat_names = _get_list_feat_names(idx_feat_dict, num_pair)
list_feat_descripts = _get_list_feat_descripts(
list_feat_names, icd9_descript_dict=icd9_descript_dict)
super(FeatureImportanceSummary, self).__init__(OrderedDict(
[('feat', list_feat_names),
('descript', list_feat_descripts), ('pair', list_pairs),
('unadjusted_t', list_unadjusted_t), ('p', list_p)]))
def get_diff_sums(hdf5_path, x_test, process_x_func, num_feature, num_class,
batch_size=1024):
"""Get differences in sums of contribution score values.
Performs preparations for determining hich features are important
for discriminating between two classes, computing DeepLIFT contribution
scores, and sums for differences of these scores between classes
(to be used for paired t-tests).
Arguments:
hdf5_path: str
path to saved HDF5 Keras Model
process_x_func: function
function for vectorizing feature data
num_feature: int
number of features present in the dataset
num_class: int
number of classes
batch_size: int
batch size
Returns:
sums_D: np.ndarray, float
2-D array of sums of differences in DeepLIFT contribution scores
with shape (num_pair, num_feature); the outer (0) dim represents the
pair of compared classes, and the inner dim (1) represents the sum
of differences in scores across features
sums_D2: np.ndarray, float
2-D array of sums of squared differences in DeepLIFT contribution
scores with shape (num_pair, num_feature); the outer (0) dim
represents the pair of compared classes, and the inner dim (1)
represents the sum of squared differences in scores across features
sums: np.ndarray, float
2-D array of sums of DeepLIFT contribution scores with shape
(num_class, num_feature); the outer (0) dim represents the pair of
compared classes, and the inner dim (1) represents the sum of
differences in scores across features
pairs: [(int, int)]
list of pairs of classes which were compared during interpretation
"""
dlc_generator = _deeplift_contribs_generator(
hdf5_path, x_test, process_x_func, num_feature=num_feature,
num_class=num_class, batch_size=batch_size)
sums_D, sums_D2, sums_contribs, pairs = _diff_sums_from_generator(
dlc_generator, num_feature=num_feature, num_class=num_class)
return sums_D, sums_D2, sums_contribs, pairs
def _deeplift_contribs_generator(hdf5_path, x_test, process_x_func,
num_feature, num_class, batch_size):
"""Generator which yields DeepLIFT contribution scores.
Applies vectorization batch-by-batch to avoid memory overflow.
Arguments:
hdf5_path: str
path to saved HDF5 Keras Model
process_x_func: function
function for vectorizing feature data
num_feature: int
number of features present in the dataset
num_class: int
number of classes
batch_size: int
batch size
"""
# convert Keras model, and get relevant function
deeplift_model = kc.convert_model_from_saved_files(
hdf5_path, nonlinear_mxts_mode=NonlinearMxtsMode.RevealCancel)
# input layer is 0, since we have a softmax layer the target layer is -2
get_deeplift_contribs = deeplift_model.get_target_contribs_func(
find_scores_layer_idx=0, target_layer_idx=-2)
num_batch = int(round(float(len(x_test)) / batch_size))
# yield a 3D array detailing the DeepLIFT contrib scores
for batch_idx, x in enumerate(chunks(x_test, batch_size)):
start = time.time()
x = process_x_func(x)
batch_size = len(x)
zeros = [0.0] * batch_size # reference data
all_batch_contribs = np.zeros((num_class, batch_size, num_feature))
for c in range(num_class):
batch_contribs = get_deeplift_contribs(
task_idx=c, input_data_list=[x], input_references_list=zeros,
batch_size=1024, progress_update=None)
all_batch_contribs[c] = batch_contribs
if not batch_idx % 10:
print('{}/{} in {:.2f} s'.format(batch_idx, num_batch,
time.time() - start))
yield all_batch_contribs
def _diff_sums_from_generator(generator, num_feature, num_class):
"""Computes sums of DeepLIFT contribution scores from a generator.
Arguments:
generator: generator
generator which yields DeepLIFT contribution scores.
num_feature: int
number of features present in the dataset
num_class: int
number of classes
Returns:
sums_D: np.ndarray, float
2-D array of sums of differences in DeepLIFT contribution scores
with shape (num_pair, num_feature); the outer (0) dim represents the
pair of compared classes, and the inner dim (1) represents the sum
of differences in scores across features
sums_D2: np.ndarray, float
2-D array of sums of squared differences in DeepLIFT contribution
scores with shape (num_pair, num_feature); the outer (0) dim
represents the pair of compared classes, and the inner dim (1)
represents the sum of squared differences in scores across features
sums: np.ndarray, float
2-D array of sums of DeepLIFT contribution scores with shape
(num_class, num_feature); the outer (0) dim represents the pair of
compared classes, and the inner dim (1) represents the sum of
differences in scores across features
pairs: [(int, int)]
list of pairs of classes which were compared during interpretation
"""
# find unique pairs
pairs = [[(i, j) for j in range(i + 1, num_class)]
for i in range(num_class)]
pairs = [p for sublist in pairs for p in sublist] # flatten
num_pair = len(pairs)
# array of running sums of differences (D) and D^2 (D2)
# for each pair (row) for each feature (column)
running_sums_D = np.zeros((num_pair, num_feature))
running_sums_D2 = np.zeros((num_pair, num_feature))
# array of running sums of contribution scores
# for each class (row) for each feature (column)
running_sums_contribs = np.zeros((num_class, num_feature))
# compute running sums for each pair of classes and their D, D2 values,
# updating these values batch-by-batch
for _, batch_contrib_scores in enumerate(generator):
for class_idx in range(num_class):
contribs = batch_contrib_scores[class_idx]
# if only 1 row (e.g., vector), do not sum, will sum all elements
if contribs.ndim > 1:
contribs = np.sum(contribs, axis=0)
running_sums_contribs[class_idx] = np.add(
running_sums_contribs[class_idx], contribs)
for pair_idx, (i, j) in enumerate(pairs):
D = np.subtract(batch_contrib_scores[i], batch_contrib_scores[j])
D2 = np.square(D)
# if only 1 row (e.g., vector), do not sum, will sum all elements
assert D.ndim == D2.ndim
if D.ndim > 1:
D = np.sum(D, axis=0)
D2 = np.sum(D2, axis=0)
assert D.shape == (num_feature, )
assert D2.shape == (num_feature, )
running_sums_D[pair_idx] = np.add(running_sums_D[pair_idx], D)
running_sums_D2[pair_idx] = np.add(running_sums_D2[pair_idx], D2)
return running_sums_D, running_sums_D2, running_sums_contribs, pairs
def _paired_ttest_with_diff_sums(sums_D, sums_D2, pairs, num_sample):
"""Performs paired t-tests with sums of differences, D and D^2.
Arguments:
sums_D: np.ndarray, float
2-D array of sums of differences with shape (num_pair, num_feature);
the outer (0) dim represents the pair of compared classes, and the
inner dim (1) represents the sum of differences across features
sums_D2: np.ndarray, float
2-D array of sums of squared differences with shape
(num_pair, num_feature); the outer (0) dim represents the pair of
compared classes, and the inner dim (1) represents the sum of
squared differences in scores features
pairs: [(int, int)]
list of pairs of classes which were compared during interpretation
num_sample: int
number of samples
Returns:
unadjusted_t_values: np.ndarray, float
2-D array of unadjusted T values with shape (num_pair, num_feature);
the outer (0) dim represents the pair of compared classes, and the
inner dim (1) represents the T value across features
p_values: np.ndarray, float
2-D array of adjusted p-values with shape (num_pair, num_feature);
the outer (0) dim represents the pair of compared classes, and the
inner dim (1) represents the adjusted p-value across features
"""
num_pair = len(pairs)
num_feature = len(sums_D[0])
# compute T for each pair of classes
unadjusted_t_values = np.empty((num_pair, num_feature)) # placeholder
for pair_idx in range(len(pairs)):
sum_D = sums_D[pair_idx]
sum_D2 = sums_D2[pair_idx]
assert np.all(~np.isnan(sum_D))
assert np.all(~np.isnan(sum_D2))
N = float(num_sample)
N_minus_1 = float(num_sample - 1)
# paired t-test formula from sums of differences
t = sum_D / np.sqrt((sum_D2 * N - sum_D * sum_D) / N_minus_1)
unadjusted_t_values[pair_idx] = t
dof = num_sample - 1 # degrees of freedom
# compute two-sided p-value, e.g., Pr(abs(t)> tt)
unadjusted_p_values = stats.t.sf(np.abs(unadjusted_t_values), dof) * 2
assert unadjusted_p_values.shape == (num_pair, num_feature)
# apply Bonferroni adjustment to p-values (multiply by # comparisons)
num_comparison = len(pairs) * num_feature
p_values = _bonferroni(unadjusted_p_values, num_comparison=num_comparison)
assert p_values.shape == (num_pair, num_feature)
return unadjusted_t_values, p_values
def _bonferroni(p_values, num_comparison):
"""Applies Bonferroni adjustment to p-values.
Arguments:
p_values: np.ndarray, float
array of p-values
num_comparison:
number of comparisons
Returns:
adjusted_p_values: np.ndarray, float
array of adjusted p-values with the same shape as p_values
"""
adjust = np.vectorize(lambda pv: min(1.0, pv * num_comparison))
adjusted_p_values = adjust(p_values)
assert np.all(adjusted_p_values[~np.isnan(adjusted_p_values)] <= 1.0)
assert np.all(adjusted_p_values[~np.isnan(adjusted_p_values)] >= 0.0)
return adjusted_p_values
def _get_list_signif_scores(unadjusted_t_values, p_values):
"""Creates two flattened lists of unadjusted T and adjusted p-values.
Flattens arrays so that scores corresponding to the same pair of compared
classes are contiguous, e.g., [f0_p0, f1_p0, f2_p0, f0_p1, f1_p1, ...].
Arguments:
unadjusted_t_values: np.ndarray, float
2-D array of unadjusted T values with shape (num_pair, num_feature);
the outer (0) dim represents the pair of compared classes, and the
inner dim (1) represents the T value across features
p_values: np.ndarray, float
2-D array of adjusted p-values with shape (num_pair, num_feature);
the outer (0) dim represents the pair of compared classes, and the
inner dim (1) represents the adjusted p-value across features
Returns:
list_unadjusted_t [float]
list of unadjusted T values with length num_feature * num_pair
list_p: [float]
list of adjusted p-values with length num_feature * num_pair
"""
num_pair = unadjusted_t_values.shape[0]
num_feature = unadjusted_t_values.shape[1]
# flatten nested lists ('C' for row-major, e.g. C style)
# e.g., np.array([[1, 2, 3], [4, 5, 6]]) => np.array([1, 2, 3, 4, 5, 6])
# e.g., corresponds to concatenated rows [row0_col0, row1_col0, row2_col0,
# row0_col1, row1_col1, row2_col1, row0_col2, row1_col2, row2_col2]
flat_utv = unadjusted_t_values.flatten('C')
flat_pv = p_values.flatten('C')
assert flat_utv.shape == (num_feature * num_pair, )
assert flat_pv.shape == (num_feature * num_pair, )
return flat_utv.tolist(), flat_pv.tolist()
def _get_list_pairs(pairs, idx_class_dict, num_feature):
"""Creates flattened list of (repeated) pairs.
The indexing corresponds with the flattened list of T values and the
flattened list of p-values obtained from _get_list_signif_scores().
Arguments:
pairs: [(int, int)]
list of pairs of classes which were compared during interpretation
idx_class_dict: {int: string}
dictionary mapping class indices to classes
num_feature: int
number of features
Returns:
list_pairs: [(string, string)]
list of pairs of compared classes with length num_feature * num_pair
"""
list_pairs = [[p] * num_feature for p in pairs]
list_pairs = [p for sublist in list_pairs for p in sublist] # flatten
list_pairs = [[idx_class_dict[p[0]], idx_class_dict[p[1]]]
for p in list_pairs] # lookup class
return list_pairs
def _get_list_feat_names(idx_feat_dict, num_pair):
"""Creates flattened list of (repeated) feature names.
The indexing corresponds with the flattened list of T values and the
flattened list of p-values obtained from _get_list_signif_scores().
Arguments:
idx_feat_dict: {int: string}
dictionary mapping feature indices to faetures
num_class: int
number of classes
Returns:
list_feat_names: [string]
list of feature names with length num_feature * num_pair
"""
num_feature = len(idx_feat_dict)
return [idx_feat_dict[feat_idx] for feat_idx in range(num_feature)] \
* num_pair
def _get_list_feat_descripts(list_feat_names, icd9_descript_dict):
"""Creates flattened list of (repeated) feature descriptions.
The indexing corresponds with the flattened list of T values and the
flattened list of p-values obtained from _get_list_signif_scores().
Arguments:
list_feat_names: [string]
list of feature names corresponding with length
num_feature * num_pair
icd9_descript_dict: {string: string}
dictionary mapping ICD9 codes to description text
Returns:
list_feat_descripts: [string]
list of feature descriptions with length num_feature * num_pair
"""
# returns the description for a feature; expects the string feature name
def _get_descript(feat, icd9_descript_dict):
if feat[:6] == 'gender':
return 'gender'
elif feat[:3] == 'age':
return 'age on record'
elif feat in icd9_descript_dict:
return icd9_descript_dict[feat]
raise ValueError('`{}` not age/gender; not found in icd9_descript_dict'
.format(feat))
list_feat_descripts = [
_get_descript(f, icd9_descript_dict=icd9_descript_dict)
for f in list_feat_names]
return list_feat_descripts
| [
"os.path.abspath",
"numpy.abs",
"numpy.subtract",
"numpy.sum",
"numpy.seterr",
"numpy.empty",
"numpy.square",
"numpy.zeros",
"numpy.isnan",
"time.time",
"deeplift.conversion.kerasapi_conversion.convert_model_from_saved_files",
"collections.OrderedDict",
"numpy.add",
"numpy.sqrt"
] | [((898, 953), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""', 'over': '"""raise"""', 'under': '"""raise"""'}), "(divide='ignore', over='raise', under='raise')\n", (907, 953), True, 'import numpy as np\n'), ((6294, 6395), 'deeplift.conversion.kerasapi_conversion.convert_model_from_saved_files', 'kc.convert_model_from_saved_files', (['hdf5_path'], {'nonlinear_mxts_mode': 'NonlinearMxtsMode.RevealCancel'}), '(hdf5_path, nonlinear_mxts_mode=\n NonlinearMxtsMode.RevealCancel)\n', (6327, 6395), True, 'from deeplift.conversion import kerasapi_conversion as kc\n'), ((9299, 9332), 'numpy.zeros', 'np.zeros', (['(num_pair, num_feature)'], {}), '((num_pair, num_feature))\n', (9307, 9332), True, 'import numpy as np\n'), ((9355, 9388), 'numpy.zeros', 'np.zeros', (['(num_pair, num_feature)'], {}), '((num_pair, num_feature))\n', (9363, 9388), True, 'import numpy as np\n'), ((9521, 9555), 'numpy.zeros', 'np.zeros', (['(num_class, num_feature)'], {}), '((num_class, num_feature))\n', (9529, 9555), True, 'import numpy as np\n'), ((12392, 12425), 'numpy.empty', 'np.empty', (['(num_pair, num_feature)'], {}), '((num_pair, num_feature))\n', (12400, 12425), True, 'import numpy as np\n'), ((6801, 6812), 'time.time', 'time.time', ([], {}), '()\n', (6810, 6812), False, 'import time\n'), ((6953, 6999), 'numpy.zeros', 'np.zeros', (['(num_class, batch_size, num_feature)'], {}), '((num_class, batch_size, num_feature))\n', (6961, 6999), True, 'import numpy as np\n'), ((3174, 3327), 'collections.OrderedDict', 'OrderedDict', (["[('feat', list_feat_names), ('descript', list_feat_descripts), ('pair',\n list_pairs), ('unadjusted_t', list_unadjusted_t), ('p', list_p)]"], {}), "([('feat', list_feat_names), ('descript', list_feat_descripts),\n ('pair', list_pairs), ('unadjusted_t', list_unadjusted_t), ('p', list_p)])\n", (3185, 3327), False, 'from collections import OrderedDict\n'), ((10044, 10094), 'numpy.add', 'np.add', (['running_sums_contribs[class_idx]', 'contribs'], {}), '(running_sums_contribs[class_idx], contribs)\n', (10050, 10094), True, 'import numpy as np\n'), ((10179, 10240), 'numpy.subtract', 'np.subtract', (['batch_contrib_scores[i]', 'batch_contrib_scores[j]'], {}), '(batch_contrib_scores[i], batch_contrib_scores[j])\n', (10190, 10240), True, 'import numpy as np\n'), ((10258, 10270), 'numpy.square', 'np.square', (['D'], {}), '(D)\n', (10267, 10270), True, 'import numpy as np\n'), ((10626, 10661), 'numpy.add', 'np.add', (['running_sums_D[pair_idx]', 'D'], {}), '(running_sums_D[pair_idx], D)\n', (10632, 10661), True, 'import numpy as np\n'), ((10702, 10739), 'numpy.add', 'np.add', (['running_sums_D2[pair_idx]', 'D2'], {}), '(running_sums_D2[pair_idx], D2)\n', (10708, 10739), True, 'import numpy as np\n'), ((12782, 12831), 'numpy.sqrt', 'np.sqrt', (['((sum_D2 * N - sum_D * sum_D) / N_minus_1)'], {}), '((sum_D2 * N - sum_D * sum_D) / N_minus_1)\n', (12789, 12831), True, 'import numpy as np\n'), ((13015, 13042), 'numpy.abs', 'np.abs', (['unadjusted_t_values'], {}), '(unadjusted_t_values)\n', (13021, 13042), True, 'import numpy as np\n'), ((725, 742), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (732, 742), False, 'from os.path import abspath\n'), ((9971, 9995), 'numpy.sum', 'np.sum', (['contribs'], {'axis': '(0)'}), '(contribs, axis=0)\n', (9977, 9995), True, 'import numpy as np\n'), ((10434, 10451), 'numpy.sum', 'np.sum', (['D'], {'axis': '(0)'}), '(D, axis=0)\n', (10440, 10451), True, 'import numpy as np\n'), ((10473, 10491), 'numpy.sum', 'np.sum', (['D2'], {'axis': '(0)'}), '(D2, axis=0)\n', (10479, 10491), True, 'import numpy as np\n'), ((12573, 12588), 'numpy.isnan', 'np.isnan', (['sum_D'], {}), '(sum_D)\n', (12581, 12588), True, 'import numpy as np\n'), ((12613, 12629), 'numpy.isnan', 'np.isnan', (['sum_D2'], {}), '(sum_D2)\n', (12621, 12629), True, 'import numpy as np\n'), ((13932, 13959), 'numpy.isnan', 'np.isnan', (['adjusted_p_values'], {}), '(adjusted_p_values)\n', (13940, 13959), True, 'import numpy as np\n'), ((14006, 14033), 'numpy.isnan', 'np.isnan', (['adjusted_p_values'], {}), '(adjusted_p_values)\n', (14014, 14033), True, 'import numpy as np\n'), ((7416, 7427), 'time.time', 'time.time', ([], {}), '()\n', (7425, 7427), False, 'import time\n')] |
"""Unit tests for prediction_io.py."""
import copy
import unittest
import numpy
from gewittergefahr.gg_utils import time_conversion
from ml4tc.io import prediction_io
TOLERANCE = 1e-6
# The following constants are used to test subset*.
TARGET_CLASSES = numpy.array([0, 1, 2, 2, 1, 0, 0, 2, 1, 1, 0, 2], dtype=int)
FORECAST_PROB_MATRIX = numpy.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1. / 3, 1. / 3, 1. / 3],
[0.5, 0.25, 0.25],
[0.4, 0.4, 0.2],
[0.6, 0.2, 0.2],
[0.2, 0.1, 0.7],
[0.8, 0.1, 0.1],
[0.7, 0.15, 0.15],
[0.2, 0.5, 0.3],
[0.9, 0.1, 0]
])
MODEL_FILE_NAME = 'foo'
CYCLONE_ID_STRINGS = [
'2005AL01', '2005WP01', '2005CP01', '2005EP01', '2005IO01', '2005SH01',
'2005AL02', '2005WP02', '2005CP02', '2005EP02', '2005IO02', '2005SH02'
]
INIT_TIME_STRINGS = [
'2005-01-01', '2005-02-02', '2005-03-03', '2005-04-04',
'2005-05-05', '2005-06-06', '2005-07-07', '2005-08-08',
'2005-09-09', '2005-10-10', '2005-11-11', '2005-12-12'
]
LATITUDES_DEG_N = numpy.full(12, 53.5)
LONGITUDES_DEG_E = numpy.full(12, 246.5)
INIT_TIMES_UNIX_SEC = numpy.array([
time_conversion.string_to_unix_sec(t, '%Y-%m-%d') for t in INIT_TIME_STRINGS
], dtype=int)
FULL_PREDICTION_DICT = {
prediction_io.TARGET_CLASSES_KEY: TARGET_CLASSES + 0,
prediction_io.PROBABILITY_MATRIX_KEY: FORECAST_PROB_MATRIX + 0.,
prediction_io.CYCLONE_IDS_KEY: copy.deepcopy(CYCLONE_ID_STRINGS),
prediction_io.STORM_LATITUDES_KEY: LATITUDES_DEG_N + 0.,
prediction_io.STORM_LONGITUDES_KEY: LONGITUDES_DEG_E + 0.,
prediction_io.INIT_TIMES_KEY: INIT_TIMES_UNIX_SEC + 0,
prediction_io.MODEL_FILE_KEY: MODEL_FILE_NAME
}
DESIRED_MONTH = 11
THESE_INDICES = numpy.array([10], dtype=int)
PREDICTION_DICT_SUBSET_BY_MONTH = {
prediction_io.TARGET_CLASSES_KEY: TARGET_CLASSES[THESE_INDICES],
prediction_io.PROBABILITY_MATRIX_KEY:
FORECAST_PROB_MATRIX[THESE_INDICES, ...],
prediction_io.CYCLONE_IDS_KEY:
[CYCLONE_ID_STRINGS[k] for k in THESE_INDICES],
prediction_io.STORM_LATITUDES_KEY: LATITUDES_DEG_N[THESE_INDICES],
prediction_io.STORM_LONGITUDES_KEY: LONGITUDES_DEG_E[THESE_INDICES],
prediction_io.INIT_TIMES_KEY: INIT_TIMES_UNIX_SEC[THESE_INDICES],
prediction_io.MODEL_FILE_KEY: MODEL_FILE_NAME
}
DESIRED_BASIN_ID_STRING = 'SL'
THESE_INDICES = numpy.array([], dtype=int)
PREDICTION_DICT_SUBSET_BY_BASIN = {
prediction_io.TARGET_CLASSES_KEY: TARGET_CLASSES[THESE_INDICES],
prediction_io.PROBABILITY_MATRIX_KEY:
FORECAST_PROB_MATRIX[THESE_INDICES, ...],
prediction_io.CYCLONE_IDS_KEY:
[CYCLONE_ID_STRINGS[k] for k in THESE_INDICES],
prediction_io.STORM_LATITUDES_KEY: LATITUDES_DEG_N[THESE_INDICES],
prediction_io.STORM_LONGITUDES_KEY: LONGITUDES_DEG_E[THESE_INDICES],
prediction_io.INIT_TIMES_KEY: INIT_TIMES_UNIX_SEC[THESE_INDICES],
prediction_io.MODEL_FILE_KEY: MODEL_FILE_NAME
}
# The following constants are used to test find_file and file_name_to_metadata.
DIRECTORY_NAME = 'foo'
MONTH = 12
BASIN_ID_STRING = 'SH'
GRID_ROW = 0
GRID_COLUMN = 666
FILE_NAME_DEFAULT = 'foo/predictions.nc'
FILE_NAME_MONTHLY = 'foo/predictions_month=12.nc'
FILE_NAME_BASIN_SPECIFIC = 'foo/predictions_basin-id-string=SH.nc'
FILE_NAME_SPATIAL = (
'foo/grid-row=000/predictions_grid-row=000_grid-column=666.nc'
)
METADATA_DICT_DEFAULT = {
prediction_io.MONTH_KEY: None,
prediction_io.BASIN_ID_KEY: None,
prediction_io.GRID_ROW_KEY: None,
prediction_io.GRID_COLUMN_KEY: None
}
METADATA_DICT_MONTHLY = {
prediction_io.MONTH_KEY: 12,
prediction_io.BASIN_ID_KEY: None,
prediction_io.GRID_ROW_KEY: None,
prediction_io.GRID_COLUMN_KEY: None
}
METADATA_DICT_BASIN_SPECIFIC = {
prediction_io.MONTH_KEY: None,
prediction_io.BASIN_ID_KEY: 'SH',
prediction_io.GRID_ROW_KEY: None,
prediction_io.GRID_COLUMN_KEY: None
}
METADATA_DICT_SPATIAL = {
prediction_io.MONTH_KEY: None,
prediction_io.BASIN_ID_KEY: None,
prediction_io.GRID_ROW_KEY: 0,
prediction_io.GRID_COLUMN_KEY: 666
}
def _compare_prediction_dicts(first_prediction_dict, second_prediction_dict):
"""Compares two dictionaries with predicted and actual target values.
:param first_prediction_dict: See doc for `prediction_io.read_file`.
:param second_prediction_dict: Same.
:return: are_dicts_equal: Boolean flag.
"""
first_keys = list(first_prediction_dict.keys())
second_keys = list(first_prediction_dict.keys())
if set(first_keys) != set(second_keys):
return False
if not numpy.allclose(
first_prediction_dict[prediction_io.PROBABILITY_MATRIX_KEY],
second_prediction_dict[prediction_io.PROBABILITY_MATRIX_KEY],
atol=TOLERANCE
):
return False
these_keys = [
prediction_io.TARGET_CLASSES_KEY, prediction_io.INIT_TIMES_KEY
]
for this_key in these_keys:
if not numpy.array_equal(
first_prediction_dict[this_key],
second_prediction_dict[this_key]
):
return False
these_keys = [prediction_io.CYCLONE_IDS_KEY, prediction_io.MODEL_FILE_KEY]
for this_key in these_keys:
if (
first_prediction_dict[this_key] !=
second_prediction_dict[this_key]
):
return False
return True
class PredictionIoTests(unittest.TestCase):
"""Each method is a unit test for prediction_io.py."""
def test_subset_by_month(self):
"""Ensures correct output from subset_by_month."""
this_prediction_dict = prediction_io.subset_by_month(
prediction_dict=copy.deepcopy(FULL_PREDICTION_DICT),
desired_month=DESIRED_MONTH
)
self.assertTrue(_compare_prediction_dicts(
this_prediction_dict, PREDICTION_DICT_SUBSET_BY_MONTH
))
def test_subset_by_basin(self):
"""Ensures correct output from subset_by_basin."""
this_prediction_dict = prediction_io.subset_by_basin(
prediction_dict=copy.deepcopy(FULL_PREDICTION_DICT),
desired_basin_id_string=DESIRED_BASIN_ID_STRING
)
self.assertTrue(_compare_prediction_dicts(
this_prediction_dict, PREDICTION_DICT_SUBSET_BY_BASIN
))
def test_find_file_default(self):
"""Ensures correct output from find_file.
In this case, using default metadata (no splitting by time or space).
"""
this_file_name = prediction_io.find_file(
directory_name=DIRECTORY_NAME, raise_error_if_missing=False
)
self.assertTrue(this_file_name == FILE_NAME_DEFAULT)
def test_find_file_monthly(self):
"""Ensures correct output from find_file.
In this case, splitting by month.
"""
this_file_name = prediction_io.find_file(
directory_name=DIRECTORY_NAME, month=MONTH,
raise_error_if_missing=False
)
self.assertTrue(this_file_name == FILE_NAME_MONTHLY)
def test_find_file_basin_specific(self):
"""Ensures correct output from find_file.
In this case, splitting by basin.
"""
this_file_name = prediction_io.find_file(
directory_name=DIRECTORY_NAME, basin_id_string=BASIN_ID_STRING,
raise_error_if_missing=False
)
self.assertTrue(this_file_name == FILE_NAME_BASIN_SPECIFIC)
def test_find_file_spatial(self):
"""Ensures correct output from find_file.
In this case, splitting by space.
"""
this_file_name = prediction_io.find_file(
directory_name=DIRECTORY_NAME, grid_row=GRID_ROW,
grid_column=GRID_COLUMN, raise_error_if_missing=False
)
self.assertTrue(this_file_name == FILE_NAME_SPATIAL)
def test_file_name_to_metadata_default(self):
"""Ensures correct output from file_name_to_metadata.
In this case, using default metadata (no splitting by time or space).
"""
this_metadata_dict = prediction_io.file_name_to_metadata(
FILE_NAME_DEFAULT
)
self.assertTrue(this_metadata_dict == METADATA_DICT_DEFAULT)
def test_file_name_to_metadata_monthly(self):
"""Ensures correct output from file_name_to_metadata.
In this case, splitting by month.
"""
this_metadata_dict = prediction_io.file_name_to_metadata(
FILE_NAME_MONTHLY
)
self.assertTrue(this_metadata_dict == METADATA_DICT_MONTHLY)
def test_file_name_to_metadata_basin_specific(self):
"""Ensures correct output from file_name_to_metadata.
In this case, splitting by basin.
"""
this_metadata_dict = prediction_io.file_name_to_metadata(
FILE_NAME_BASIN_SPECIFIC
)
self.assertTrue(this_metadata_dict == METADATA_DICT_BASIN_SPECIFIC)
def test_file_name_to_metadata_spatial(self):
"""Ensures correct output from file_name_to_metadata.
In this case, splitting by space.
"""
this_metadata_dict = prediction_io.file_name_to_metadata(
FILE_NAME_SPATIAL
)
self.assertTrue(this_metadata_dict == METADATA_DICT_SPATIAL)
if __name__ == '__main__':
unittest.main()
| [
"numpy.full",
"unittest.main",
"copy.deepcopy",
"numpy.allclose",
"numpy.array",
"ml4tc.io.prediction_io.find_file",
"numpy.array_equal",
"ml4tc.io.prediction_io.file_name_to_metadata",
"gewittergefahr.gg_utils.time_conversion.string_to_unix_sec"
] | [((256, 316), 'numpy.array', 'numpy.array', (['[0, 1, 2, 2, 1, 0, 0, 2, 1, 1, 0, 2]'], {'dtype': 'int'}), '([0, 1, 2, 2, 1, 0, 0, 2, 1, 1, 0, 2], dtype=int)\n', (267, 316), False, 'import numpy\n'), ((340, 562), 'numpy.array', 'numpy.array', (['[[1, 0, 0], [0, 1, 0], [0, 0, 1], [1.0 / 3, 1.0 / 3, 1.0 / 3], [0.5, 0.25, \n 0.25], [0.4, 0.4, 0.2], [0.6, 0.2, 0.2], [0.2, 0.1, 0.7], [0.8, 0.1, \n 0.1], [0.7, 0.15, 0.15], [0.2, 0.5, 0.3], [0.9, 0.1, 0]]'], {}), '([[1, 0, 0], [0, 1, 0], [0, 0, 1], [1.0 / 3, 1.0 / 3, 1.0 / 3],\n [0.5, 0.25, 0.25], [0.4, 0.4, 0.2], [0.6, 0.2, 0.2], [0.2, 0.1, 0.7], [\n 0.8, 0.1, 0.1], [0.7, 0.15, 0.15], [0.2, 0.5, 0.3], [0.9, 0.1, 0]])\n', (351, 562), False, 'import numpy\n'), ((1023, 1043), 'numpy.full', 'numpy.full', (['(12)', '(53.5)'], {}), '(12, 53.5)\n', (1033, 1043), False, 'import numpy\n'), ((1063, 1084), 'numpy.full', 'numpy.full', (['(12)', '(246.5)'], {}), '(12, 246.5)\n', (1073, 1084), False, 'import numpy\n'), ((1710, 1738), 'numpy.array', 'numpy.array', (['[10]'], {'dtype': 'int'}), '([10], dtype=int)\n', (1721, 1738), False, 'import numpy\n'), ((2342, 2368), 'numpy.array', 'numpy.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (2353, 2368), False, 'import numpy\n'), ((1404, 1437), 'copy.deepcopy', 'copy.deepcopy', (['CYCLONE_ID_STRINGS'], {}), '(CYCLONE_ID_STRINGS)\n', (1417, 1437), False, 'import copy\n'), ((9284, 9299), 'unittest.main', 'unittest.main', ([], {}), '()\n', (9297, 9299), False, 'import unittest\n'), ((1125, 1174), 'gewittergefahr.gg_utils.time_conversion.string_to_unix_sec', 'time_conversion.string_to_unix_sec', (['t', '"""%Y-%m-%d"""'], {}), "(t, '%Y-%m-%d')\n", (1159, 1174), False, 'from gewittergefahr.gg_utils import time_conversion\n'), ((4568, 4730), 'numpy.allclose', 'numpy.allclose', (['first_prediction_dict[prediction_io.PROBABILITY_MATRIX_KEY]', 'second_prediction_dict[prediction_io.PROBABILITY_MATRIX_KEY]'], {'atol': 'TOLERANCE'}), '(first_prediction_dict[prediction_io.PROBABILITY_MATRIX_KEY],\n second_prediction_dict[prediction_io.PROBABILITY_MATRIX_KEY], atol=\n TOLERANCE)\n', (4582, 4730), False, 'import numpy\n'), ((6496, 6584), 'ml4tc.io.prediction_io.find_file', 'prediction_io.find_file', ([], {'directory_name': 'DIRECTORY_NAME', 'raise_error_if_missing': '(False)'}), '(directory_name=DIRECTORY_NAME,\n raise_error_if_missing=False)\n', (6519, 6584), False, 'from ml4tc.io import prediction_io\n'), ((6834, 6935), 'ml4tc.io.prediction_io.find_file', 'prediction_io.find_file', ([], {'directory_name': 'DIRECTORY_NAME', 'month': 'MONTH', 'raise_error_if_missing': '(False)'}), '(directory_name=DIRECTORY_NAME, month=MONTH,\n raise_error_if_missing=False)\n', (6857, 6935), False, 'from ml4tc.io import prediction_io\n'), ((7204, 7326), 'ml4tc.io.prediction_io.find_file', 'prediction_io.find_file', ([], {'directory_name': 'DIRECTORY_NAME', 'basin_id_string': 'BASIN_ID_STRING', 'raise_error_if_missing': '(False)'}), '(directory_name=DIRECTORY_NAME, basin_id_string=\n BASIN_ID_STRING, raise_error_if_missing=False)\n', (7227, 7326), False, 'from ml4tc.io import prediction_io\n'), ((7594, 7726), 'ml4tc.io.prediction_io.find_file', 'prediction_io.find_file', ([], {'directory_name': 'DIRECTORY_NAME', 'grid_row': 'GRID_ROW', 'grid_column': 'GRID_COLUMN', 'raise_error_if_missing': '(False)'}), '(directory_name=DIRECTORY_NAME, grid_row=GRID_ROW,\n grid_column=GRID_COLUMN, raise_error_if_missing=False)\n', (7617, 7726), False, 'from ml4tc.io import prediction_io\n'), ((8052, 8106), 'ml4tc.io.prediction_io.file_name_to_metadata', 'prediction_io.file_name_to_metadata', (['FILE_NAME_DEFAULT'], {}), '(FILE_NAME_DEFAULT)\n', (8087, 8106), False, 'from ml4tc.io import prediction_io\n'), ((8396, 8450), 'ml4tc.io.prediction_io.file_name_to_metadata', 'prediction_io.file_name_to_metadata', (['FILE_NAME_MONTHLY'], {}), '(FILE_NAME_MONTHLY)\n', (8431, 8450), False, 'from ml4tc.io import prediction_io\n'), ((8747, 8808), 'ml4tc.io.prediction_io.file_name_to_metadata', 'prediction_io.file_name_to_metadata', (['FILE_NAME_BASIN_SPECIFIC'], {}), '(FILE_NAME_BASIN_SPECIFIC)\n', (8782, 8808), False, 'from ml4tc.io import prediction_io\n'), ((9105, 9159), 'ml4tc.io.prediction_io.file_name_to_metadata', 'prediction_io.file_name_to_metadata', (['FILE_NAME_SPATIAL'], {}), '(FILE_NAME_SPATIAL)\n', (9140, 9159), False, 'from ml4tc.io import prediction_io\n'), ((4930, 5019), 'numpy.array_equal', 'numpy.array_equal', (['first_prediction_dict[this_key]', 'second_prediction_dict[this_key]'], {}), '(first_prediction_dict[this_key], second_prediction_dict[\n this_key])\n', (4947, 5019), False, 'import numpy\n'), ((5653, 5688), 'copy.deepcopy', 'copy.deepcopy', (['FULL_PREDICTION_DICT'], {}), '(FULL_PREDICTION_DICT)\n', (5666, 5688), False, 'import copy\n'), ((6055, 6090), 'copy.deepcopy', 'copy.deepcopy', (['FULL_PREDICTION_DICT'], {}), '(FULL_PREDICTION_DICT)\n', (6068, 6090), False, 'import copy\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
import pandas as pd
import datetime as dt
from scipy import stats
import pymannkendall as mk
from Modules import Read
from Modules.Utils import Listador, FindOutlier, FindOutlierMAD, Cycles
from Modules.Graphs import GraphSerieOutliers, GraphSerieOutliersMAD
from Modules.ENSO import ONIdata, OuliersENSOjust
ONI = ONIdata()
ONI = ONI['Anomalie'].astype(float)
ENSO = ONI[np.where((ONI.values<=-0.5)|(ONI.values>=0.5))[0]]
Path_out = os.path.abspath(os.path.join(os.path.dirname(__file__), 'Tests'))
def CompareNormStandar(Statistical, significance, tails=1):
"""
Compare an statistical of any test with the normal standar
INPUTS
Statistical : float of the value to compare in the normal standar
significance: level of confidence to acept o reject the test
tails : integer in [1,2] to use a test with one or two tails
OUTPUTS
test : boolean with the aceptance of rejection of the null hypothesis
"""
cuantil = 1-significance/tails
Z_norm = stats.norm.ppf(cuantil,loc=0,scale=1)
Pass = abs(Statistical)<Z_norm
return Pass
def CompareTdist(Statistical, DegreesFredom, significance, tails=1):
"""
Compare an statistical of any test with the t estudent distribution
INPUTS
Statistical : float of the value to compare in the normal standar
DegreesFredom : Degrees of fredom of the distirbution
significance : level of confidence to acept o reject the test
tails : integer in [1,2] to use a test with one or two tails
OUTPUTS
test : boolean with the aceptance of rejection of the null hypothesis
"""
cuantil = 1-significance/tails
t = stats.t.ppf(cuantil,df=DegreesFredom)
Pass = abs(Statistical)<t
return Pass
def SingChange(Serie):
"""
Count times where are sing change
INPUTS
Serie : list or array of with the data
"""
if isinstance(Serie, list) == True:
Serie = np.array(Serie)
sing = np.zeros(len(Serie),dtype=int) +1
sing[np.array(Serie)<0] = -1
# return sum((x ^ y)<0 for x, y in zip(Serie, Serie[1:])) # only works for integers
return sum((x ^ y)<0 for x, y in zip(sing, sing[1:]))
def PeaksValleys(Serie):
"""
Fin the peaks and valleys in a serie
INPUTS
Serie : list or array of with the data
"""
if isinstance(Serie, list) == True:
Serie = np.array(Serie)
diff = Serie[:-1]-Serie[1:]
sing = np.zeros(len(diff),dtype=int) +1
sing[np.array(diff)<0] = -1
return sum(((x ^ y)^(y ^ z))<0 for x, y, z in zip(sing, sing[1:], sing[2:]))
def RunsTest(Serie, significance=5E-2):
"""
Make run test (Rachas) for a series
INPUTS
Serie : list or array with the data
significance : level of significance to acept or reject the null hypothesis
OUTPUTS
test : boolean with the aceptance of rejection of the null hypothesis
"""
S_median = np.median(Serie)
runs = SingChange(Serie-S_median)
n1 = np.where(Serie>=S_median)[0].shape[0]
n2 = np.where(Serie< S_median)[0].shape[0]
runs_exp = ((2*n1*n2)/(n1+n2))+1
stan_dev = np.sqrt((2*n1*n2*(2*n1*n2-n1-n2))/ \
(((n1+n2)**2)*(n1+n2-1)))
z = (runs-runs_exp)/stan_dev
test = CompareNormStandar(z, significance,tails=2)
return test
def ChangePointTest(Serie, significance=5E-2):
"""
Make change point test for a serie
INPUTS
Serie : list or array with the data
significance : level of significance to acept or reject the null hypothesis
OUTPUTS
test : boolean with the aceptance of rejection of the null hypothesis
"""
N = len(Serie)
M = PeaksValleys(Serie)
U = abs((M-(2./3.)*(N-2))/np.sqrt((16*N-29)/90.))
test = CompareNormStandar(U, significance,tails=2)
return test
def SpearmanCoefTest(Serie, significance=5E-2):
"""
Make Spearman coeficient test
INPUTS
Serie : list or array with the data
significance : level of significance to acept or reject the null hypothesis
OUTPUTS
test : boolean with the aceptance of rejection of the null hypothesis
"""
if isinstance(Serie, list) == True:
Serie = np.array(Serie)
n = len(Serie)
S = Serie[Serie.argsort()]
R = 1-(6/(n*((n**2)-1)))* np.sum((Serie-S)**2 )
U = abs(R*np.sqrt(n-2)/np.sqrt(1-(R**2)))
test = CompareTdist(U,DegreesFredom=n-2,significance=significance,tails=2)
return test
def AndersonTest(Serie, rezagos=None, significance=5E-2, ):
"""
Make andreson independence test
INPUTS
"""
cuantil = 1-significance/2
Z_norm = stats.norm.ppf(cuantil,loc=0,scale=1)
N = len(Serie)
if rezagos is None:
rezagos = N -2
Mean = np.nanmean(Serie)
r = np.empty(len(Serie), dtype=float)
t = np.empty(len(Serie), dtype=bool)
for k in range(rezagos):
lim_up = (-1 + Z_norm*np.sqrt(N-k-1))/(N-k)
lim_dw = (-1 - Z_norm*np.sqrt(N-k-1))/(N-k)
r[k] = np.sum((Serie[:N-k]-Mean)*(Serie[k:]-Mean))/np.sum((Serie - Mean)**2)
if (r[k] > lim_dw)&(r[k]<lim_up):
t[k] = True
else:
t[k] = False
if t.sum() == N:
test = True
else:
test = False
return test
def MannKendall_modified(Serie, rezagos=None, significance=5E-2):
"""
This function checks the Modified Mann-Kendall (MK) test using Hamed and Rao (1998) method.
"""
MK = mk.hamed_rao_modification_test(Serie,alpha=significance,lag=rezagos)
test = CompareNormStandar(MK.z, significance,tails=2)
return test
# Est_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'CleanData/PPT'))
Est_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'CleanData/QDL'))
Estaciones = Listador(Est_path,final='.csv')
Pruebas = ['Rachas', 'PuntoCambio', 'Spearman', 'Anderson','MannKendall']
Test = pd.DataFrame([], columns=Pruebas)
Outl = pd.DataFrame([], columns=['outlier_inf','outlier_sup'])
for i in range(len(Estaciones)):
Meta = pd.read_csv(os.path.join(Est_path, Estaciones[i].split('.')[0]+'.meta'),index_col=0)
if Meta.iloc[-2].values[0] == u'Nivel máximo diario':
continue
Name = Meta.iloc[0].values[0]
Esta = Estaciones[i].split('.csv')[0]
if Est_path.endswith('QDL'):
Meta.iloc[-2].values[0] = u'Caudal'
Name += 'QDL_'
Esta += 'QDL'
Dat = Read.EstacionCSV_pd(Estaciones[i], Name, path=Est_path)
# if Estaciones[i].endswith('N.csv') == False:
try:
Dat.index = [dt.datetime.strptime(fecha.strftime("%Y-%m-%d") , "%Y-%d-%m") for fecha in Dat.index]
except:
pass
Dat = Dat.dropna()
# dat = Dat.values.ravel()
# yearly = Dat.groupby(lambda y: y.year).max().values.ravel()
mensual = Dat.groupby(lambda m: (m.year,m.month)).max()
out_inf, out_sup = FindOutlier(mensual,clean=False,index=False,lims=True, restrict_inf=0)
# Path_SaveFigure = os.path.join(Path_out,Meta.iloc[-4].values[0])
GraphSerieOutliers(mensual, out_inf, out_sup,
title=Name,
label=Meta.iloc[-2].values[0],
png=True, pdf=False,
name=Estaciones[i].split('.csv')[0],
PathFigs=os.path.join(Path_out,Meta.iloc[-4].values[0]))
if os.path.exists(os.path.join(Path_out,Meta.iloc[-4].values[0])) == False:
os.makedirs(os.path.join(Path_out,Meta.iloc[-4].values[0]))
Serie = OuliersENSOjust(Dat.sort_index(),method='IQR', ENSO=ENSO,
write=True, name=Esta+'_OutlierIQR',
graph=True, label=Meta.iloc[-2].values[0], title=Name,
pdf=False, png=True,
Path_Out=os.path.join(Path_out,Meta.iloc[-4].values[0]))
serie = OuliersENSOjust(Dat.sort_index(),method='MAD', ENSO=ENSO,
write=True, name=Esta+'_OutlierMAD',
graph=True, label=Meta.iloc[-2].values[0], title=Name,
pdf=False, png=True,
Path_Out=os.path.join(Path_out,Meta.iloc[-4].values[0]))
yearly = Serie.groupby(lambda y: y.year).max().values.ravel()
if len(yearly)>3:
tst = {'Rachas' :RunsTest(yearly),
'PuntoCambio':ChangePointTest(yearly),
'Spearman' :SpearmanCoefTest(yearly),
'Anderson' :AndersonTest(yearly),
'MannKendall':MannKendall_modified(yearly, rezagos=None),}
out = {'outlier_inf':out_inf,
'outlier_sup':out_sup}
Est = pd.Series(data=tst, name=Name+'Caudal' if Meta.iloc[-4].values[0]=='CAUDAL' else Name+'Nivel')
Out = pd.Series(data=out, name=Name+'Caudal' if Meta.iloc[-4].values[0]=='CAUDAL' else Name+'Nivel')
Test = Test.append(Est)
Outl = Outl.append(Out)
if Est_path.endswith('CleanNiveles'):
sufix = 'NR'
else:
sufix = ''
Test.to_csv(os.path.join(Path_out,f'Test_{sufix}.csv'), sep=',')
Outl.to_csv(os.path.join(Path_out,f'Outliers_{sufix}.csv'), sep=',')
| [
"pandas.DataFrame",
"scipy.stats.norm.ppf",
"pymannkendall.hamed_rao_modification_test",
"numpy.sum",
"numpy.median",
"Modules.Utils.Listador",
"os.path.dirname",
"Modules.ENSO.ONIdata",
"numpy.where",
"numpy.nanmean",
"Modules.Read.EstacionCSV_pd",
"numpy.array",
"pandas.Series",
"scipy.s... | [((395, 404), 'Modules.ENSO.ONIdata', 'ONIdata', ([], {}), '()\n', (402, 404), False, 'from Modules.ENSO import ONIdata, OuliersENSOjust\n'), ((5833, 5865), 'Modules.Utils.Listador', 'Listador', (['Est_path'], {'final': '""".csv"""'}), "(Est_path, final='.csv')\n", (5841, 5865), False, 'from Modules.Utils import Listador, FindOutlier, FindOutlierMAD, Cycles\n'), ((5948, 5981), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {'columns': 'Pruebas'}), '([], columns=Pruebas)\n', (5960, 5981), True, 'import pandas as pd\n'), ((5989, 6045), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {'columns': "['outlier_inf', 'outlier_sup']"}), "([], columns=['outlier_inf', 'outlier_sup'])\n", (6001, 6045), True, 'import pandas as pd\n'), ((1074, 1113), 'scipy.stats.norm.ppf', 'stats.norm.ppf', (['cuantil'], {'loc': '(0)', 'scale': '(1)'}), '(cuantil, loc=0, scale=1)\n', (1088, 1113), False, 'from scipy import stats\n'), ((1734, 1772), 'scipy.stats.t.ppf', 'stats.t.ppf', (['cuantil'], {'df': 'DegreesFredom'}), '(cuantil, df=DegreesFredom)\n', (1745, 1772), False, 'from scipy import stats\n'), ((2983, 2999), 'numpy.median', 'np.median', (['Serie'], {}), '(Serie)\n', (2992, 2999), True, 'import numpy as np\n'), ((3185, 3270), 'numpy.sqrt', 'np.sqrt', (['(2 * n1 * n2 * (2 * n1 * n2 - n1 - n2) / ((n1 + n2) ** 2 * (n1 + n2 - 1)))'], {}), '(2 * n1 * n2 * (2 * n1 * n2 - n1 - n2) / ((n1 + n2) ** 2 * (n1 + n2 -\n 1)))\n', (3192, 3270), True, 'import numpy as np\n'), ((4682, 4721), 'scipy.stats.norm.ppf', 'stats.norm.ppf', (['cuantil'], {'loc': '(0)', 'scale': '(1)'}), '(cuantil, loc=0, scale=1)\n', (4696, 4721), False, 'from scipy import stats\n'), ((4797, 4814), 'numpy.nanmean', 'np.nanmean', (['Serie'], {}), '(Serie)\n', (4807, 4814), True, 'import numpy as np\n'), ((5501, 5571), 'pymannkendall.hamed_rao_modification_test', 'mk.hamed_rao_modification_test', (['Serie'], {'alpha': 'significance', 'lag': 'rezagos'}), '(Serie, alpha=significance, lag=rezagos)\n', (5531, 5571), True, 'import pymannkendall as mk\n'), ((6462, 6517), 'Modules.Read.EstacionCSV_pd', 'Read.EstacionCSV_pd', (['Estaciones[i]', 'Name'], {'path': 'Est_path'}), '(Estaciones[i], Name, path=Est_path)\n', (6481, 6517), False, 'from Modules import Read\n'), ((6916, 6989), 'Modules.Utils.FindOutlier', 'FindOutlier', (['mensual'], {'clean': '(False)', 'index': '(False)', 'lims': '(True)', 'restrict_inf': '(0)'}), '(mensual, clean=False, index=False, lims=True, restrict_inf=0)\n', (6927, 6989), False, 'from Modules.Utils import Listador, FindOutlier, FindOutlierMAD, Cycles\n'), ((9054, 9097), 'os.path.join', 'os.path.join', (['Path_out', 'f"""Test_{sufix}.csv"""'], {}), "(Path_out, f'Test_{sufix}.csv')\n", (9066, 9097), False, 'import os\n'), ((9123, 9170), 'os.path.join', 'os.path.join', (['Path_out', 'f"""Outliers_{sufix}.csv"""'], {}), "(Path_out, f'Outliers_{sufix}.csv')\n", (9135, 9170), False, 'import os\n'), ((452, 504), 'numpy.where', 'np.where', (['((ONI.values <= -0.5) | (ONI.values >= 0.5))'], {}), '((ONI.values <= -0.5) | (ONI.values >= 0.5))\n', (460, 504), True, 'import numpy as np\n'), ((545, 570), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (560, 570), False, 'import os\n'), ((2009, 2024), 'numpy.array', 'np.array', (['Serie'], {}), '(Serie)\n', (2017, 2024), True, 'import numpy as np\n'), ((2444, 2459), 'numpy.array', 'np.array', (['Serie'], {}), '(Serie)\n', (2452, 2459), True, 'import numpy as np\n'), ((4246, 4261), 'numpy.array', 'np.array', (['Serie'], {}), '(Serie)\n', (4254, 4261), True, 'import numpy as np\n'), ((5773, 5798), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (5788, 5798), False, 'import os\n'), ((8697, 8801), 'pandas.Series', 'pd.Series', ([], {'data': 'tst', 'name': "(Name + 'Caudal' if Meta.iloc[-4].values[0] == 'CAUDAL' else Name + 'Nivel')"}), "(data=tst, name=Name + 'Caudal' if Meta.iloc[-4].values[0] ==\n 'CAUDAL' else Name + 'Nivel')\n", (8706, 8801), True, 'import pandas as pd\n'), ((8806, 8910), 'pandas.Series', 'pd.Series', ([], {'data': 'out', 'name': "(Name + 'Caudal' if Meta.iloc[-4].values[0] == 'CAUDAL' else Name + 'Nivel')"}), "(data=out, name=Name + 'Caudal' if Meta.iloc[-4].values[0] ==\n 'CAUDAL' else Name + 'Nivel')\n", (8815, 8910), True, 'import pandas as pd\n'), ((2080, 2095), 'numpy.array', 'np.array', (['Serie'], {}), '(Serie)\n', (2088, 2095), True, 'import numpy as np\n'), ((2547, 2561), 'numpy.array', 'np.array', (['diff'], {}), '(diff)\n', (2555, 2561), True, 'import numpy as np\n'), ((3777, 3806), 'numpy.sqrt', 'np.sqrt', (['((16 * N - 29) / 90.0)'], {}), '((16 * N - 29) / 90.0)\n', (3784, 3806), True, 'import numpy as np\n'), ((4344, 4368), 'numpy.sum', 'np.sum', (['((Serie - S) ** 2)'], {}), '((Serie - S) ** 2)\n', (4350, 4368), True, 'import numpy as np\n'), ((4394, 4413), 'numpy.sqrt', 'np.sqrt', (['(1 - R ** 2)'], {}), '(1 - R ** 2)\n', (4401, 4413), True, 'import numpy as np\n'), ((5048, 5099), 'numpy.sum', 'np.sum', (['((Serie[:N - k] - Mean) * (Serie[k:] - Mean))'], {}), '((Serie[:N - k] - Mean) * (Serie[k:] - Mean))\n', (5054, 5099), True, 'import numpy as np\n'), ((5092, 5119), 'numpy.sum', 'np.sum', (['((Serie - Mean) ** 2)'], {}), '((Serie - Mean) ** 2)\n', (5098, 5119), True, 'import numpy as np\n'), ((7334, 7381), 'os.path.join', 'os.path.join', (['Path_out', 'Meta.iloc[-4].values[0]'], {}), '(Path_out, Meta.iloc[-4].values[0])\n', (7346, 7381), False, 'import os\n'), ((7405, 7452), 'os.path.join', 'os.path.join', (['Path_out', 'Meta.iloc[-4].values[0]'], {}), '(Path_out, Meta.iloc[-4].values[0])\n', (7417, 7452), False, 'import os\n'), ((7483, 7530), 'os.path.join', 'os.path.join', (['Path_out', 'Meta.iloc[-4].values[0]'], {}), '(Path_out, Meta.iloc[-4].values[0])\n', (7495, 7530), False, 'import os\n'), ((7835, 7882), 'os.path.join', 'os.path.join', (['Path_out', 'Meta.iloc[-4].values[0]'], {}), '(Path_out, Meta.iloc[-4].values[0])\n', (7847, 7882), False, 'import os\n'), ((8187, 8234), 'os.path.join', 'os.path.join', (['Path_out', 'Meta.iloc[-4].values[0]'], {}), '(Path_out, Meta.iloc[-4].values[0])\n', (8199, 8234), False, 'import os\n'), ((3047, 3074), 'numpy.where', 'np.where', (['(Serie >= S_median)'], {}), '(Serie >= S_median)\n', (3055, 3074), True, 'import numpy as np\n'), ((3094, 3120), 'numpy.where', 'np.where', (['(Serie < S_median)'], {}), '(Serie < S_median)\n', (3102, 3120), True, 'import numpy as np\n'), ((4381, 4395), 'numpy.sqrt', 'np.sqrt', (['(n - 2)'], {}), '(n - 2)\n', (4388, 4395), True, 'import numpy as np\n'), ((4958, 4976), 'numpy.sqrt', 'np.sqrt', (['(N - k - 1)'], {}), '(N - k - 1)\n', (4965, 4976), True, 'import numpy as np\n'), ((5010, 5028), 'numpy.sqrt', 'np.sqrt', (['(N - k - 1)'], {}), '(N - k - 1)\n', (5017, 5028), True, 'import numpy as np\n')] |
import numpy as np
import psyneulink as pnl
import psyneulink.core.components.functions.transferfunctions
from psyneulink.core.components.functions.statefulfunctions.integratorfunctions import AccumulatorIntegrator
from psyneulink.core.components.functions.transferfunctions import Logistic
from psyneulink.core.components.mechanisms.modulatory.control.gating.gatingmechanism import GatingMechanism
from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism
from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection
from psyneulink.core.globals.keywords import \
DEFAULT_VARIABLE, FUNCTION, FUNCTION_PARAMS, INITIALIZER, RATE, TARGET_MECHANISM, VALUE
from psyneulink.core.compositions.composition import Composition
def test_gating_with_composition():
"""Tests same configuration as control of InputPort in tests/mechansims/test_identicalness_of_control_and_gating
"""
Input_Layer = TransferMechanism(name='Input Layer', function=Logistic, size=2)
Hidden_Layer_1 = TransferMechanism(name='Hidden Layer_1', function=Logistic, size=5)
Hidden_Layer_2 = TransferMechanism(name='Hidden Layer_2', function=Logistic, size=4)
Output_Layer = TransferMechanism(name='Output Layer', function=Logistic, size=3)
Gating_Mechanism = GatingMechanism(size=[1], gate=[Hidden_Layer_1, Hidden_Layer_2, Output_Layer])
Input_Weights_matrix = (np.arange(2 * 5).reshape((2, 5)) + 1) / (2 * 5)
Middle_Weights_matrix = (np.arange(5 * 4).reshape((5, 4)) + 1) / (5 * 4)
Output_Weights_matrix = (np.arange(4 * 3).reshape((4, 3)) + 1) / (4 * 3)
# This projection is specified in add_backpropagation_learning_pathway method below
Input_Weights = MappingProjection(name='Input Weights',matrix=Input_Weights_matrix)
# This projection is "discovered" by add_backpropagation_learning_pathway method below
Middle_Weights = MappingProjection(name='Middle Weights',sender=Hidden_Layer_1,receiver=Hidden_Layer_2,
matrix={
VALUE: Middle_Weights_matrix,
FUNCTION: AccumulatorIntegrator,
FUNCTION_PARAMS: {
DEFAULT_VARIABLE: Middle_Weights_matrix,
INITIALIZER: Middle_Weights_matrix,
RATE: Middle_Weights_matrix
},
}
)
Output_Weights = MappingProjection(sender=Hidden_Layer_2, receiver=Output_Layer, matrix=Output_Weights_matrix)
pathway = [Input_Layer, Input_Weights, Hidden_Layer_1, Hidden_Layer_2, Output_Layer]
comp = Composition()
backprop_pathway = comp.add_backpropagation_learning_pathway(pathway=pathway,
loss_function=None)
# c.add_linear_processing_pathway(pathway=z)
comp.add_node(Gating_Mechanism)
stim_list = {
Input_Layer: [[-1, 30]],
Gating_Mechanism: [1.0],
backprop_pathway.target: [[0, 0, 1]]}
comp.learn(num_trials=3, inputs=stim_list)
expected_results = [[[0.81493513, 0.85129046, 0.88154205]],
[[0.81331773, 0.85008207, 0.88157851]],
[[0.81168332, 0.84886047, 0.88161468]]]
assert np.allclose(comp.results, expected_results)
stim_list[Gating_Mechanism]=[0.0]
results = comp.learn(num_trials=1, inputs=stim_list)
expected_results = [[[0.5, 0.5, 0.5]]]
assert np.allclose(results, expected_results)
stim_list[Gating_Mechanism]=[2.0]
results = comp.learn(num_trials=1, inputs=stim_list)
expected_results = [[0.96941429, 0.9837254 , 0.99217549]]
assert np.allclose(results, expected_results)
def test_gating_with_UDF_with_composition():
def my_linear_fct(
x,
m=2.0,
b=0.0,
params={
pnl.ADDITIVE_PARAM: 'b',
pnl.MULTIPLICATIVE_PARAM: 'm'
}
):
return m * x + b
def my_simple_linear_fct(
x,
m=1.0,
b=0.0
):
return m * x + b
def my_exp_fct(
x,
r=1.0,
# b=pnl.CONTROL,
b=0.0,
params={
pnl.ADDITIVE_PARAM: 'b',
pnl.MULTIPLICATIVE_PARAM: 'r'
}
):
return x**r + b
def my_sinusoidal_fct(
input,
phase=0,
amplitude=1,
params={
pnl.ADDITIVE_PARAM: 'phase',
pnl.MULTIPLICATIVE_PARAM: 'amplitude'
}
):
frequency = input[0]
t = input[1]
return amplitude * np.sin(2 * np.pi * frequency * t + phase)
Input_Layer = pnl.TransferMechanism(
name='Input_Layer',
default_variable=np.zeros((2,)),
function=psyneulink.core.components.functions.transferfunctions.Logistic
)
Output_Layer = pnl.TransferMechanism(
name='Output_Layer',
default_variable=[0, 0, 0],
function=psyneulink.core.components.functions.transferfunctions.Linear,
# function=pnl.Logistic,
# output_ports={pnl.NAME: 'RESULTS USING UDF',
# pnl.VARIABLE: [(pnl.OWNER_VALUE,0), pnl.TIME_STEP],
# pnl.FUNCTION: my_sinusoidal_fct}
output_ports={
pnl.NAME: 'RESULTS USING UDF',
# pnl.VARIABLE: (pnl.OWNER_VALUE, 0),
pnl.FUNCTION: psyneulink.core.components.functions.transferfunctions.Linear(slope=pnl.GATING)
}
)
Gating_Mechanism = pnl.GatingMechanism(
size=[1],
gating_signals=[
# Output_Layer
Output_Layer.output_port,
]
)
comp = Composition()
comp.add_linear_processing_pathway(pathway=[Input_Layer, Output_Layer])
comp.add_node(Gating_Mechanism)
stim_list = {
Input_Layer: [[-1, 30], [-1, 30], [-1, 30], [-1, 30]],
Gating_Mechanism: [[0.0], [0.5], [1.0], [2.0]]
}
comp.run(num_trials=4, inputs=stim_list)
expected_results = [
[np.array([0., 0., 0.])],
[np.array([0.63447071, 0.63447071, 0.63447071])],
[np.array([1.26894142, 1.26894142, 1.26894142])],
[np.array([2.53788284, 2.53788284, 2.53788284])]
]
np.testing.assert_allclose(comp.results, expected_results)
| [
"psyneulink.core.components.mechanisms.modulatory.control.gating.gatingmechanism.GatingMechanism",
"psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism",
"numpy.allclose",
"numpy.zeros",
"psyneulink.GatingMechanism",
"numpy.sin",
"numpy.array",
"psyneulink.core.compone... | [((976, 1040), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""Input Layer"""', 'function': 'Logistic', 'size': '(2)'}), "(name='Input Layer', function=Logistic, size=2)\n", (993, 1040), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((1062, 1129), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""Hidden Layer_1"""', 'function': 'Logistic', 'size': '(5)'}), "(name='Hidden Layer_1', function=Logistic, size=5)\n", (1079, 1129), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((1151, 1218), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""Hidden Layer_2"""', 'function': 'Logistic', 'size': '(4)'}), "(name='Hidden Layer_2', function=Logistic, size=4)\n", (1168, 1218), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((1238, 1303), 'psyneulink.core.components.mechanisms.processing.transfermechanism.TransferMechanism', 'TransferMechanism', ([], {'name': '"""Output Layer"""', 'function': 'Logistic', 'size': '(3)'}), "(name='Output Layer', function=Logistic, size=3)\n", (1255, 1303), False, 'from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n'), ((1328, 1406), 'psyneulink.core.components.mechanisms.modulatory.control.gating.gatingmechanism.GatingMechanism', 'GatingMechanism', ([], {'size': '[1]', 'gate': '[Hidden_Layer_1, Hidden_Layer_2, Output_Layer]'}), '(size=[1], gate=[Hidden_Layer_1, Hidden_Layer_2, Output_Layer])\n', (1343, 1406), False, 'from psyneulink.core.components.mechanisms.modulatory.control.gating.gatingmechanism import GatingMechanism\n'), ((1746, 1814), 'psyneulink.core.components.projections.pathway.mappingprojection.MappingProjection', 'MappingProjection', ([], {'name': '"""Input Weights"""', 'matrix': 'Input_Weights_matrix'}), "(name='Input Weights', matrix=Input_Weights_matrix)\n", (1763, 1814), False, 'from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection\n'), ((1926, 2228), 'psyneulink.core.components.projections.pathway.mappingprojection.MappingProjection', 'MappingProjection', ([], {'name': '"""Middle Weights"""', 'sender': 'Hidden_Layer_1', 'receiver': 'Hidden_Layer_2', 'matrix': '{VALUE: Middle_Weights_matrix, FUNCTION: AccumulatorIntegrator,\n FUNCTION_PARAMS: {DEFAULT_VARIABLE: Middle_Weights_matrix, INITIALIZER:\n Middle_Weights_matrix, RATE: Middle_Weights_matrix}}'}), "(name='Middle Weights', sender=Hidden_Layer_1, receiver=\n Hidden_Layer_2, matrix={VALUE: Middle_Weights_matrix, FUNCTION:\n AccumulatorIntegrator, FUNCTION_PARAMS: {DEFAULT_VARIABLE:\n Middle_Weights_matrix, INITIALIZER: Middle_Weights_matrix, RATE:\n Middle_Weights_matrix}})\n", (1943, 2228), False, 'from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection\n'), ((2353, 2451), 'psyneulink.core.components.projections.pathway.mappingprojection.MappingProjection', 'MappingProjection', ([], {'sender': 'Hidden_Layer_2', 'receiver': 'Output_Layer', 'matrix': 'Output_Weights_matrix'}), '(sender=Hidden_Layer_2, receiver=Output_Layer, matrix=\n Output_Weights_matrix)\n', (2370, 2451), False, 'from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection\n'), ((2548, 2561), 'psyneulink.core.compositions.composition.Composition', 'Composition', ([], {}), '()\n', (2559, 2561), False, 'from psyneulink.core.compositions.composition import Composition\n'), ((3198, 3241), 'numpy.allclose', 'np.allclose', (['comp.results', 'expected_results'], {}), '(comp.results, expected_results)\n', (3209, 3241), True, 'import numpy as np\n'), ((3392, 3430), 'numpy.allclose', 'np.allclose', (['results', 'expected_results'], {}), '(results, expected_results)\n', (3403, 3430), True, 'import numpy as np\n'), ((3600, 3638), 'numpy.allclose', 'np.allclose', (['results', 'expected_results'], {}), '(results, expected_results)\n', (3611, 3638), True, 'import numpy as np\n'), ((5410, 5482), 'psyneulink.GatingMechanism', 'pnl.GatingMechanism', ([], {'size': '[1]', 'gating_signals': '[Output_Layer.output_port]'}), '(size=[1], gating_signals=[Output_Layer.output_port])\n', (5429, 5482), True, 'import psyneulink as pnl\n'), ((5567, 5580), 'psyneulink.core.compositions.composition.Composition', 'Composition', ([], {}), '()\n', (5578, 5580), False, 'from psyneulink.core.compositions.composition import Composition\n'), ((6126, 6184), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['comp.results', 'expected_results'], {}), '(comp.results, expected_results)\n', (6152, 6184), True, 'import numpy as np\n'), ((4497, 4538), 'numpy.sin', 'np.sin', (['(2 * np.pi * frequency * t + phase)'], {}), '(2 * np.pi * frequency * t + phase)\n', (4503, 4538), True, 'import numpy as np\n'), ((4634, 4648), 'numpy.zeros', 'np.zeros', (['(2,)'], {}), '((2,))\n', (4642, 4648), True, 'import numpy as np\n'), ((5917, 5942), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (5925, 5942), True, 'import numpy as np\n'), ((5951, 5997), 'numpy.array', 'np.array', (['[0.63447071, 0.63447071, 0.63447071]'], {}), '([0.63447071, 0.63447071, 0.63447071])\n', (5959, 5997), True, 'import numpy as np\n'), ((6009, 6055), 'numpy.array', 'np.array', (['[1.26894142, 1.26894142, 1.26894142]'], {}), '([1.26894142, 1.26894142, 1.26894142])\n', (6017, 6055), True, 'import numpy as np\n'), ((6067, 6113), 'numpy.array', 'np.array', (['[2.53788284, 2.53788284, 2.53788284]'], {}), '([2.53788284, 2.53788284, 2.53788284])\n', (6075, 6113), True, 'import numpy as np\n'), ((1436, 1452), 'numpy.arange', 'np.arange', (['(2 * 5)'], {}), '(2 * 5)\n', (1445, 1452), True, 'import numpy as np\n'), ((1513, 1529), 'numpy.arange', 'np.arange', (['(5 * 4)'], {}), '(5 * 4)\n', (1522, 1529), True, 'import numpy as np\n'), ((1590, 1606), 'numpy.arange', 'np.arange', (['(4 * 3)'], {}), '(4 * 3)\n', (1599, 1606), True, 'import numpy as np\n')] |
# Author: <NAME> <<EMAIL>>
# License: BSD
import numpy as np
from scipy import sparse
from ..graph import graph_laplacian
def test_graph_laplacian():
for mat in (np.arange(10) * np.arange(10)[:, np.newaxis],
np.ones((7, 7)),
np.eye(19),
np.vander(np.arange(4)) + np.vander(np.arange(4)).T,
):
sp_mat = sparse.csr_matrix(mat)
for normed in (True, False):
laplacian = graph_laplacian(mat, normed=normed)
n_nodes = mat.shape[0]
if not normed:
np.testing.assert_array_almost_equal(laplacian.sum(axis=0),
np.zeros(n_nodes))
np.testing.assert_array_almost_equal(laplacian.T,
laplacian)
np.testing.assert_array_almost_equal(laplacian,
graph_laplacian(sp_mat, normed=normed).todense())
| [
"numpy.zeros",
"numpy.ones",
"scipy.sparse.csr_matrix",
"numpy.arange",
"numpy.eye",
"numpy.testing.assert_array_almost_equal"
] | [((232, 247), 'numpy.ones', 'np.ones', (['(7, 7)'], {}), '((7, 7))\n', (239, 247), True, 'import numpy as np\n'), ((265, 275), 'numpy.eye', 'np.eye', (['(19)'], {}), '(19)\n', (271, 275), True, 'import numpy as np\n'), ((381, 403), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['mat'], {}), '(mat)\n', (398, 403), False, 'from scipy import sparse\n'), ((170, 183), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (179, 183), True, 'import numpy as np\n'), ((714, 774), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['laplacian.T', 'laplacian'], {}), '(laplacian.T, laplacian)\n', (750, 774), True, 'import numpy as np\n'), ((186, 199), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (195, 199), True, 'import numpy as np\n'), ((303, 315), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (312, 315), True, 'import numpy as np\n'), ((329, 341), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (338, 341), True, 'import numpy as np\n'), ((683, 700), 'numpy.zeros', 'np.zeros', (['n_nodes'], {}), '(n_nodes)\n', (691, 700), True, 'import numpy as np\n')] |
"""
Script that trains Tensorflow Progressive Multitask models on UV datasets.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import tempfile
import shutil
import numpy as np
import deepchem as dc
from MERCK_datasets import load_uv
# Set numpy seed
np.random.seed(123)
###Load data###
shard_size = 2000
num_shards_per_batch = 4
print("About to load MERCK data.")
UV_tasks, datasets, transformers = load_uv(
shard_size=shard_size, num_shards_per_batch=num_shards_per_batch)
train_dataset, valid_dataset, test_dataset = datasets
print("Number of compounds in train set")
print(len(train_dataset))
print("Number of compounds in validation set")
print(len(valid_dataset))
print("Number of compounds in test set")
print(len(test_dataset))
###Create model###
n_layers = 3
nb_epoch = 50
model = dc.models.ProgressiveMultitaskRegressor(
len(UV_tasks), train_dataset.get_data_shape()[0],
layer_sizes=[25]*n_layers, dropouts=[.25]*n_layers,
alpha_init_stddevs=[.02]*n_layers, weight_init_stddevs=[.02]*n_layers,
bias_init_consts=[1.]*n_layers, learning_rate=.0003,
penalty=.0001, penalty_type="l2", optimizer="adam", batch_size=100,
seed=123, verbosity="high")
#Use R2 classification metric
metric = dc.metrics.Metric(dc.metrics.pearson_r2_score, task_averager=np.mean)
print("Training model")
model.fit(train_dataset, nb_epoch=nb_epoch)
#model.old_fit(train_dataset, nb_epoch=nb_epoch)
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
#Only use for final evaluation
test_scores = model.evaluate(test_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
print("Test scores")
print(test_scores)
| [
"MERCK_datasets.load_uv",
"numpy.random.seed",
"deepchem.metrics.Metric"
] | [((329, 348), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (343, 348), True, 'import numpy as np\n'), ((479, 552), 'MERCK_datasets.load_uv', 'load_uv', ([], {'shard_size': 'shard_size', 'num_shards_per_batch': 'num_shards_per_batch'}), '(shard_size=shard_size, num_shards_per_batch=num_shards_per_batch)\n', (486, 552), False, 'from MERCK_datasets import load_uv\n'), ((1303, 1372), 'deepchem.metrics.Metric', 'dc.metrics.Metric', (['dc.metrics.pearson_r2_score'], {'task_averager': 'np.mean'}), '(dc.metrics.pearson_r2_score, task_averager=np.mean)\n', (1320, 1372), True, 'import deepchem as dc\n')] |
import os
import copy
from enum import Enum
import mujoco_py
import numpy as np
from gym.utils import EzPickle, transformations as tf
from gym.envs.robotics.robot_env import RobotEnv
from gym.envs.robotics.utils import reset_mocap2body_xpos, reset_mocap_welds
def _check_range(a, a_min, a_max, include_bounds=True):
if include_bounds:
return np.all((a_min <= a) & (a <= a_max))
else:
return np.all((a_min < a) & (a < a_max))
def _ctrl_set_action(sim, action):
# Originally from gym.envs.robotics.utils
if sim.data.ctrl is not None:
for i in range(action.shape[0]):
if sim.model.actuator_biastype[i] == 0:
sim.data.ctrl[i] = action[i]
else:
idx = sim.model.jnt_qposadr[sim.model.actuator_trnid[i, 0]]
sim.data.ctrl[i] = sim.data.qpos[idx] + action[i]
def _mocap_set_action(sim, action):
# Originally from gym.envs.robotics.utils
if sim.model.nmocap > 0:
pos_delta = action[:, :3]
quat_delta = action[:, 3:]
reset_mocap2body_xpos(sim)
sim.data.mocap_pos[:] += pos_delta
sim.data.mocap_quat[:] += quat_delta
OBJECTS = dict(
egg=dict(type='ellipsoid', size='0.03 0.03 0.04'),
small_box=dict(type='box', size='0.022 0.022 0.022'),
box=dict(type='box', size='0.03 0.03 0.03'),
# sphere=dict(type='ellipsoid', size='0.028 0.028 0.028'),
# small_sphere=dict(type='ellipsoid', size='0.024 0.024 0.024'),
# fetch_box=dict(type='box', size='0.025 0.025 0.025', mass=2.0),
fetch_box=dict(type='box', size='0.025 0.025 0.025', mass=2.0),
fetch_sphere=dict(type='sphere', size='0.025 0.025 0.025', mass=2.0, friction="1 0.001 0.0001"),
)
class YumiTask(Enum):
REACH = 1
PICK_AND_PLACE_BAR = 2
LIFT_ABOVE_TABLE = 3
PICK_AND_PLACE_OBJECT = 4
class YumiEnv(RobotEnv):
def __init__(self, *, arm, block_gripper, reward_type, task: YumiTask, distance_threshold=0.05,
ignore_target_rotation=True, randomize_initial_object_pos=False, object_id=None, object_on_table=False,
has_rotating_platform=False, has_button=False, extended_bounds=False, has_object_box=False):
if arm not in ['right', 'left', 'both']:
raise ValueError
self.arm = arm
if reward_type not in ['sparse', 'dense']:
raise ValueError
self.reward_type = reward_type
self.task = task
if task == YumiTask.LIFT_ABOVE_TABLE:
if reward_type == 'sparse':
raise NotImplementedError
if arm != 'both':
raise NotImplementedError
self.object_on_table = object_on_table
self.block_gripper = block_gripper
self.distance_threshold = distance_threshold
self.ignore_target_rotation = ignore_target_rotation
self.randomize_initial_object_pos = randomize_initial_object_pos
self.has_rotating_platform = has_rotating_platform
self.has_button = has_button
self.has_object_box = has_object_box
self._table_safe_bounds = (np.r_[-0.20, -0.43], np.r_[0.35, 0.43])
self._target_bounds_l = (np.r_[-0.20, 0.07, 0.05], np.r_[0.35, 0.43, 0.6])
self._target_bounds_r = (np.r_[-0.20, -0.43, 0.05], np.r_[0.35, -0.07, 0.6])
self._obj_target_bounds = (np.r_[-0.12, -0.12, 0.05], np.r_[0.12, 0.12, 0.25])
self._obj_init_bounds = (np.r_[-0.12, -0.12], np.r_[0.12, 0.12])
if task == YumiTask.LIFT_ABOVE_TABLE:
self._obj_init_bounds = (np.r_[-0.05, -0.05], np.r_[0.05, 0.05])
if extended_bounds:
self._obj_target_bounds = (np.r_[-0.24, -0.28, 0.05], np.r_[0.18, 0.28, 0.25])
self._obj_init_bounds = (np.r_[-0.25, -0.21], np.r_[0.12, 0.21])
self._button_pressed = False
self._object_xy_pos_to_sync = None
self._gripper_r_joint_idx = None
self._gripper_l_joint_idx = None
self._arm_r_joint_idx = None
self._arm_l_joint_idx = None
self._object_z_offset = 0.0
self._gripper_joint_max = 0.02
n_actions = 7
if not block_gripper:
n_actions += 1
if arm == 'both':
n_actions *= 2
if self.task == YumiTask.PICK_AND_PLACE_BAR:
object_xml = """
<body name="object0" pos="0.025 0.025 0.025">
<joint name="object0:joint" type="free" damping="0.01"/>
<geom size="0.005 0.150 0.025" type="box" condim="4" name="object0" material="block_mat" mass="0.2" friction="1 0.95 0.01" solimp="0.99 0.99 0.01" solref="0.01 1"/>
<geom size="0.025 0.150 0.005" pos="0 0 -0.02" type="box" condim="4" name="object0_base" material="block_mat" mass="1.8" solimp="0.99 0.99 0.01" solref="0.01 1"/>
<site name="object0:center" pos="0 0 0" size="0.02 0.02 0.02" rgba="0 0 1 1" type="sphere"/>
<site name="object0:left" pos="0 0.125 0" size="0.02 0.02 0.02" rgba="0 1 0 1" type="sphere"/>
<site name="object0:right" pos="0 -0.125 0" size="0.02 0.02 0.02" rgba="1 0 0 1" type="sphere"/>
</body>
<body name="target0" pos="1 0.87 0.2">
<geom size="0.005 0.150 0.025" type="box" name="target0" material="block_mat_target" contype="0" conaffinity="0"/>
<geom size="0.025 0.150 0.005" pos="0 0 -0.02" type="box" name="target0_base" material="block_mat_target" contype="0" conaffinity="0"/>
<site name="target0:center" pos="0 0 0" size="0.02 0.02 0.02" rgba="0 0 1 0.5" type="sphere"/>
<site name="target0:left" pos="0 0.125 0" size="0.02 0.02 0.02" rgba="0 1 0 0.5" type="sphere"/>
<site name="target0:right" pos="0 -0.125 0" size="0.02 0.02 0.02" rgba="1 0 0 0.5" type="sphere"/>
</body>
"""
elif self.task == YumiTask.LIFT_ABOVE_TABLE:
object_xml = """
<body name="object0" pos="0.025 0.025 0.025">
<joint name="object0:joint" type="free" damping="0.01"/>
<geom size="0.120 0.025" type="cylinder" condim="4" name="object0_base" material="block_mat" mass="1.0" solimp="0.99 0.99 0.01" solref="0.01 1"/>
<site name="object0:center" pos="0 0 0" size="0.02 0.02 0.02" rgba="0 0 1 1" type="sphere"/>
</body>
"""
elif self.task == YumiTask.PICK_AND_PLACE_OBJECT:
object_id = object_id or 'egg'
obj = dict(OBJECTS[object_id])
if 'mass' not in obj.keys():
obj['mass'] = 0.2
props = " ".join([f'{k}="{v}"' for k, v in obj.items()])
object_xml = f"""
<body name="object0" pos="0.025 0.025 0.025">
<joint name="object0:joint" type="free" damping="0.01"/>
<geom {props} condim="4" name="object0_base" material="block_mat" solimp="0.99 0.99 0.01" solref="0.01 1"/>
<site name="object0:center" pos="0 0 0" size="0.02 0.02 0.02" rgba="0 0 1 1" type="sphere"/>
</body>
<body name="target0" pos="1 0.87 0.2">
<geom {props} name="target0" material="block_mat_target" contype="0" conaffinity="0"/>
<site name="target0:center" pos="0 0 0" size="0.02 0.02 0.02" rgba="0 0 1 0.5" type="sphere"/>
</body>
"""
else:
object_xml = ""
rot_platform_xml = ""
if has_rotating_platform:
rot_platform_xml = """
<body name="rotating_platform" pos="0.1 -0.2 0.02">
<inertial pos="0 0 0" mass="2" diaginertia="0.1 0.1 0.1" />
<joint type="hinge" name="rotating_platform_joint" damping="0.8" axis="0 0 1" limited="false"/>
<geom pos="0 0 0" rgba="0 0.5 0 1" size="0.3 0.05 0.01" type="box" friction="1 0.95 0.01"/>
<geom pos="0.29 0 0.02" rgba="0.5 0 0 1" size="0.01 0.05 0.005" type="box" friction="1 0.95 0.01"/>
<geom pos="0.24 0.04 0.02" rgba="1 0 0 1" size="0.05 0.01 0.005" type="box" friction="1 0.95 0.01"/>
<geom pos="0.24 -0.04 0.02" rgba="0 0 1 1" size="0.05 0.01 0.005" type="box" friction="1 0.95 0.01"/>
<site name="rotating_platform:far_end" pos="0.25 0 0"
size="0.02 0.02 0.02" rgba="0 0 1 0.5" type="sphere"/>
</body>
"""
button_xml = ""
if has_button:
button_xml = """
<body name="button" pos="-0.15 0 0.02">
<inertial pos="0 0 0" mass="2" diaginertia="0.1 0.1 0.1" />
<geom pos="0 0 0" rgba="0 0.5 0 1" size="0.05 0.05 0.01" type="box"/>
<geom name="button_geom" pos="0 0 0.01" rgba="1 0 0 1" size="0.02 0.02 0.01" type="box"/>
</body>
"""
object_box_xml = ""
if has_object_box:
object_box_xml = """
<body name="object_box" pos="0 0 0.06">
<geom pos="0 0.07 0" rgba="0 0.5 0 1" size="0.1 0.005 0.05" type="box" solimp="0.99 0.99 0.01" solref="0.01 1"/>
<geom pos="0 -0.07 0" rgba="1 0 0 1" size="0.1 0.005 0.05" type="box" solimp="0.99 0.99 0.01" solref="0.01 1"/>
</body>
"""
model_path = os.path.join(os.path.dirname(__file__), 'assets', f'yumi_{arm}.xml')
xml_format = dict(object=object_xml, rotating_platform=rot_platform_xml, button=button_xml, object_box=object_box_xml)
super(YumiEnv, self).__init__(model_path=model_path, n_substeps=5,
n_actions=n_actions, initial_qpos=None, xml_format=xml_format)
@property
def has_object(self):
return self.task != YumiTask.REACH
def mocap_control(self, action):
reset_mocap2body_xpos(self.sim)
self.sim.model.eq_active[:] = 1
_mocap_set_action(self.sim, action)
self.sim.step()
self.sim.model.eq_active[:] = 0
def mocap_ik(self, pose_delta, arm):
prev_s = copy.deepcopy(self.sim.get_state())
mocap_a = np.zeros((self.sim.model.nmocap, 7))
if arm == 'l' or (arm == 'r' and not self.has_two_arms):
mocap_a[0] = pose_delta
elif arm == 'r':
mocap_a[1] = pose_delta
else:
raise NotImplementedError
self.mocap_control(mocap_a)
target_qpos = self.sim.data.qpos.copy()
self.sim.set_state(prev_s)
arm_target_qpos = target_qpos[getattr(self, f'_arm_{arm}_joint_idx')]
return arm_target_qpos
def is_pressing_button(self):
if not self.has_button:
return False
sim = self.sim
for i in range(sim.data.ncon):
contact = sim.data.contact[i]
body_name_1 = sim.model.body_id2name(sim.model.geom_bodyid[contact.geom1])
body_name_2 = sim.model.body_id2name(sim.model.geom_bodyid[contact.geom2])
geom_name_1 = sim.model.geom_id2name(contact.geom1)
geom_name_2 = sim.model.geom_id2name(contact.geom2)
if 'gripper' in body_name_1 and 'button_geom' == geom_name_2 or \
'gripper' in body_name_2 and 'button_geom' == geom_name_1:
return True
return False
def get_table_surface_pose(self):
pose = np.r_[
self.sim.data.get_body_xpos('table'),
self.sim.data.get_body_xquat('table'),
]
geom = self.sim.model.geom_name2id('table')
size = self.sim.model.geom_size[geom].copy()
pose[2] += size[2]
return pose
def sync_object_init_pos(self, pos: np.ndarray, wrt_table=False, now=False):
assert pos.size == 2
if wrt_table:
pose = tf.apply_tf(
np.r_[pos, 0., 1., 0., 0., 0.],
self.get_table_surface_pose()
)
self._object_xy_pos_to_sync = pose[:2]
else:
self._object_xy_pos_to_sync = pos.copy()
if now:
object_qpos = self.sim.data.get_joint_qpos('object0:joint').copy()
object_qpos[:2] = self._object_xy_pos_to_sync
self.sim.data.set_joint_qpos('object0:joint', object_qpos)
self.sim.forward()
def get_object_contact_points(self, other_body='gripper'):
if not self.has_object:
raise NotImplementedError("Cannot get object contact points in an environment without objects!")
sim = self.sim
object_name = 'object0'
object_pos = self.sim.data.get_body_xpos(object_name)
object_rot = self.sim.data.get_body_xmat(object_name)
contact_points = []
# Partially from: https://gist.github.com/machinaut/209c44e8c55245c0d0f0094693053158
for i in range(sim.data.ncon):
# Note that the contact array has more than `ncon` entries,
# so be careful to only read the valid entries.
contact = sim.data.contact[i]
body_name_1 = sim.model.body_id2name(sim.model.geom_bodyid[contact.geom1])
body_name_2 = sim.model.body_id2name(sim.model.geom_bodyid[contact.geom2])
if other_body in body_name_1 and body_name_2 == object_name or \
other_body in body_name_2 and body_name_1 == object_name:
c_force = np.zeros(6, dtype=np.float64)
mujoco_py.functions.mj_contactForce(sim.model, sim.data, i, c_force)
# Compute contact point position wrt the object
rel_contact_pos = object_rot.T @ (contact.pos - object_pos)
contact_points.append(dict(
body1=body_name_1,
body2=body_name_2,
relative_pos=rel_contact_pos,
force=c_force
))
return contact_points
def _reset_button(self):
if self.has_button:
self._button_pressed = False
self.sim.model.body_pos[self.sim.model.body_name2id("button"), :] = (0.0, 0.0, 0.02)
# GoalEnv methods
# ----------------------------
def compute_reward(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: dict):
assert achieved_goal.shape == desired_goal.shape
if self.reward_type == 'sparse':
if self.task == YumiTask.PICK_AND_PLACE_BAR:
success = self._is_success(achieved_goal, desired_goal)
return success - 1
elif self.task == YumiTask.REACH:
success_l = float(self.has_left_arm) * self._is_success(achieved_goal[..., :3], desired_goal[..., :3])
success_r = float(self.has_right_arm) * self._is_success(achieved_goal[..., 3:], desired_goal[..., 3:])
success = success_l + success_r
if self.has_two_arms:
return success - 2
else:
return success - 1
else:
raise NotImplementedError
else:
d = np.linalg.norm(achieved_goal - desired_goal, axis=-1)
return -d
# RobotEnv methods
# ----------------------------
def _reset_sim(self):
qpos = self.init_qpos.copy()
qvel = self.init_qvel.copy()
if self._arm_l_joint_idx is not None:
pos_low_l = np.r_[0.8, -0.3, -0.4, -0.4, -0.3, -0.3, -0.3]
pos_high_l = np.r_[1.4, 0.6, 0.4, 0.4, 0.3, 0.3, 0.3]
qpos[self._arm_l_joint_idx] = self.np_random.uniform(pos_low_l, pos_high_l)
qvel[self._arm_l_joint_idx] = 0.0
if self._gripper_l_joint_idx is not None:
qpos[self._gripper_l_joint_idx] = 0.0
qvel[self._gripper_l_joint_idx] = 0.0
if self._arm_r_joint_idx is not None:
pos_low_r = np.r_[-1.4, -0.3, -0.4, -0.4, -0.3, -0.3, -0.3]
pos_high_r = np.r_[-0.8, 0.6, 0.4, 0.4, 0.3, 0.3, 0.3]
qpos[self._arm_r_joint_idx] = self.np_random.uniform(pos_low_r, pos_high_r)
qvel[self._arm_r_joint_idx] = 0.0
if self._gripper_r_joint_idx is not None:
qpos[self._gripper_r_joint_idx] = 0.0
qvel[self._gripper_r_joint_idx] = 0.0
self.sim.data.ctrl[:] = 0.0
self._set_sim_state(qpos, qvel)
if self.has_left_arm:
# make sure the left gripper is above the table
gripper_z = self.sim.data.get_site_xpos('gripper_l_center')[2]
if gripper_z < 0.043:
return False
if self.has_right_arm:
# make sure the right gripper is above the table
gripper_z = self.sim.data.get_site_xpos('gripper_r_center')[2]
if gripper_z < 0.043:
return False
if self.has_object:
self._object_xy_pos_to_sync = self.np_random.uniform(*self._obj_init_bounds)
object_qpos = self.sim.data.get_joint_qpos('object0:joint').copy()
if self.has_rotating_platform:
object_qpos[2] += 0.020
object_qpos[:2] = self.sim.data.get_site_xpos('rotating_platform:far_end')[:2]
elif self.has_button:
object_qpos[:2] = 0.375, -0.476
elif self.randomize_initial_object_pos:
# Randomize initial position of object.
object_qpos[:2] = self._object_xy_pos_to_sync.copy()
self.sim.data.set_joint_qpos('object0:joint', object_qpos)
self._reset_button()
return True
def _did_press_button(self):
if self._button_pressed:
return
self._button_pressed = True
# reset object position
self.sync_object_init_pos(self._object_xy_pos_to_sync, now=True)
# hide button away
self.sim.model.body_pos[self.sim.model.body_name2id("button"), :] = (2., 2., 2.)
self.sim.forward()
def _get_obs(self):
arm_l_qpos = np.zeros(0)
arm_l_qvel = np.zeros(0)
gripper_l_qpos = np.zeros(0)
gripper_l_pos = np.zeros(0)
gripper_l_vel = np.zeros(0)
gripper_l_to_obj = np.zeros(0)
arm_r_qpos = np.zeros(0)
arm_r_qvel = np.zeros(0)
gripper_r_qpos = np.zeros(0)
gripper_r_pos = np.zeros(0)
gripper_r_vel = np.zeros(0)
gripper_r_to_obj = np.zeros(0)
dt = self.sim.nsubsteps * self.sim.model.opt.timestep
if self.is_pressing_button():
self._did_press_button()
if self.has_left_arm:
arm_l_qpos = self.sim.data.qpos[self._arm_l_joint_idx]
arm_l_qvel = self.sim.data.qvel[self._arm_l_joint_idx]
arm_l_qvel = np.clip(arm_l_qvel, -10, 10)
gripper_l_pos = self.sim.data.get_site_xpos('gripper_l_center').copy()
gripper_l_vel = self.sim.data.get_site_xvelp('gripper_l_center') * dt
if self._gripper_l_joint_idx is not None:
gripper_l_qpos = self.sim.data.qpos[self._gripper_l_joint_idx]
if self.has_right_arm:
arm_r_qpos = self.sim.data.qpos[self._arm_r_joint_idx]
arm_r_qvel = self.sim.data.qvel[self._arm_r_joint_idx]
arm_r_qvel = np.clip(arm_r_qvel, -10, 10)
gripper_r_pos = self.sim.data.get_site_xpos('gripper_r_center').copy()
gripper_r_vel = self.sim.data.get_site_xvelp('gripper_r_center') * dt
if self._gripper_r_joint_idx is not None:
gripper_r_qpos = self.sim.data.qpos[self._gripper_r_joint_idx]
object_pose = np.zeros(0)
object_velp = np.zeros(0)
object_velr = np.zeros(0)
if self.task == YumiTask.PICK_AND_PLACE_BAR:
# Achieved goal is object position and quaternion
object_pose = np.zeros(7)
object_pose[:3] = self.sim.data.get_body_xpos('object0')
if not self.ignore_target_rotation:
object_pose[3:] = self.sim.data.get_body_xquat('object0')
achieved_goal = object_pose.copy()
if self.has_left_arm:
gripper_l_to_obj = self.sim.data.get_site_xpos('object0:left') - gripper_l_pos
if self.has_right_arm:
gripper_r_to_obj = self.sim.data.get_site_xpos('object0:right') - gripper_r_pos
object_velp = self.sim.data.get_site_xvelp('object0:center') * dt
object_velr = self.sim.data.get_site_xvelr('object0:center') * dt
elif self.task == YumiTask.REACH:
# Achieved goal is gripper(s) position(s)
achieved_goal = np.zeros(6)
if self.has_left_arm:
achieved_goal[:3] = gripper_l_pos.copy()
if self.has_right_arm:
achieved_goal[3:] = gripper_r_pos.copy()
elif self.task == YumiTask.LIFT_ABOVE_TABLE:
# Achieved goal is distance above table
object_pose = np.zeros(7)
object_pose[:3] = self.sim.data.get_body_xpos('object0')
if not self.ignore_target_rotation:
object_pose[3:] = self.sim.data.get_body_xquat('object0')
d_above_table = object_pose[2] - self._object_z_offset
achieved_goal = np.r_[d_above_table]
elif self.task == YumiTask.PICK_AND_PLACE_OBJECT:
object_pose = np.zeros(7)
object_pose[:3] = self.sim.data.get_body_xpos('object0')
if not self.ignore_target_rotation:
object_pose[3:] = self.sim.data.get_body_xquat('object0')
achieved_goal = object_pose.copy()
object_velp = self.sim.data.get_site_xvelp('object0:center') * dt
object_velr = self.sim.data.get_site_xvelr('object0:center') * dt
else:
raise NotImplementedError
obs = np.concatenate([
arm_l_qpos, arm_l_qvel, gripper_l_qpos,
arm_r_qpos, arm_r_qvel, gripper_r_qpos,
gripper_l_pos, gripper_r_pos,
gripper_l_vel, gripper_r_vel,
gripper_l_to_obj, gripper_r_to_obj,
object_pose, object_velp, object_velr,
])
return {
'observation': obs,
'achieved_goal': achieved_goal,
'desired_goal': self.goal.copy(),
}
def _set_action(self, a):
a = np.clip(a, self.action_space.low, self.action_space.high)
if not self.block_gripper:
arm1_a = a[:8]
arm2_a = a[8:]
# remap [-1, 1] to [0, gripper_joint_max]
gripper1_a = self._gripper_joint_max * (arm1_a[7:] + 1.0) / 2.0
gripper2_a = self._gripper_joint_max * (arm2_a[7:] + 1.0) / 2.0
a = np.r_[arm1_a, gripper1_a, arm2_a, gripper2_a]
else:
arm1_a = a[:7]
arm2_a = a[7:]
g = self._gripper_joint_max
a = np.r_[arm1_a, g, g]
if self.has_two_arms:
a = np.r_[a, arm2_a, g, g]
_ctrl_set_action(self.sim, a)
return a
def _is_success(self, achieved_goal, desired_goal):
d = np.linalg.norm(achieved_goal - desired_goal, axis=-1)
return (d < self.distance_threshold).astype(np.float32)
def _sample_goal(self):
if self.task == YumiTask.PICK_AND_PLACE_BAR or self.task == YumiTask.PICK_AND_PLACE_OBJECT:
# Goal is object target position and quaternion
new_goal = np.zeros(7)
new_goal[:3] = self.np_random.uniform(*self._obj_target_bounds)
# TODO: Randomize rotation
if self.object_on_table:
new_goal[2] = 0.025
elif self.task == YumiTask.REACH:
# Goal is gripper(s) target position(s)
new_goal = np.zeros(6)
old_state = copy.deepcopy(self.sim.get_state())
if self.has_left_arm:
while True:
left_arm_q = self._sample_safe_qpos(self._arm_l_joint_idx)
grp_l_pos = self._fk_position(left_arm_q=left_arm_q, restore_state=False)
if _check_range(grp_l_pos, *self._target_bounds_l):
new_goal[:3] = grp_l_pos
break
if self.has_right_arm:
while True:
right_arm_q = self._sample_safe_qpos(self._arm_r_joint_idx)
grp_r_pos = self._fk_position(right_arm_q=right_arm_q, restore_state=False)
if _check_range(grp_r_pos, *self._target_bounds_r):
new_goal[3:] = grp_r_pos
break
self.sim.set_state(old_state)
self.sim.forward()
elif self.task == YumiTask.LIFT_ABOVE_TABLE:
# Object should be 40cm above the table
# This is not actually feasible, but should work well regardless.
new_goal = np.r_[0.40]
else:
raise NotImplementedError
return new_goal
def _env_setup(self, initial_qpos):
if initial_qpos is not None:
raise NotImplementedError
reset_mocap_welds(self.sim)
self.sim.forward()
self.init_qpos = self.sim.data.qpos.ravel().copy()
self.init_qvel = self.sim.data.qvel.ravel().copy()
yumi_arm_joints = [1, 2, 7, 3, 4, 5, 6]
if self.has_right_arm:
self._arm_r_joint_idx = [self.sim.model.joint_name2id(f'yumi_joint_{i}_r') for i in yumi_arm_joints]
self.arm_r_joint_lims = self.sim.model.jnt_range[self._arm_r_joint_idx].copy()
if self.has_left_arm:
self._arm_l_joint_idx = [self.sim.model.joint_name2id(f'yumi_joint_{i}_l') for i in yumi_arm_joints]
self.arm_l_joint_lims = self.sim.model.jnt_range[self._arm_l_joint_idx].copy()
if not self.block_gripper:
if self.has_right_arm:
self._gripper_r_joint_idx = [self.sim.model.joint_name2id('gripper_r_joint'),
self.sim.model.joint_name2id('gripper_r_joint_m')]
if self.has_left_arm:
self._gripper_l_joint_idx = [self.sim.model.joint_name2id('gripper_l_joint'),
self.sim.model.joint_name2id('gripper_l_joint_m')]
# Extract information for sampling goals.
if self.has_left_arm:
self._initial_l_gripper_pos = self.sim.data.get_site_xpos('gripper_l_center').copy()
if self.has_right_arm:
self._initial_r_gripper_pos = self.sim.data.get_site_xpos('gripper_r_center').copy()
if self.has_object:
for _ in range(10):
self.sim.step()
self._object_z_offset = self.sim.data.get_body_xpos('object0')[2]
self._reset_sim()
def _viewer_setup(self):
self.viewer.cam.distance = 1.7
self.viewer.cam.elevation = -20
self.viewer.cam.azimuth = 180
def _step_callback(self):
# Visualize target.
if self.task == YumiTask.PICK_AND_PLACE_BAR or self.task == YumiTask.PICK_AND_PLACE_OBJECT:
bodies_offset = (self.sim.data.body_xpos - self.sim.model.body_pos).copy()
body_id = self.sim.model.body_name2id('target0')
self.sim.model.body_pos[body_id, :] = self.goal[:3] - bodies_offset[body_id]
elif self.task == YumiTask.REACH:
sites_offset = (self.sim.data.site_xpos - self.sim.model.site_pos).copy()
if self.has_left_arm:
site_id = self.sim.model.site_name2id('target_l')
self.sim.model.site_pos[site_id] = self.goal[:3] - sites_offset[site_id]
if self.has_right_arm:
site_id = self.sim.model.site_name2id('target_r')
self.sim.model.site_pos[site_id] = self.goal[3:] - sites_offset[site_id]
self.sim.forward()
# Utilities
# ----------------------------
@staticmethod
def get_urdf_model():
from urdf_parser_py.urdf import URDF
root_dir = os.path.dirname(__file__)
model = URDF.from_xml_file(os.path.join(root_dir, 'assets/misc/yumi.urdf'))
return model
@property
def has_right_arm(self):
return self.arm == 'right' or self.arm == 'both'
@property
def has_left_arm(self):
return self.arm == 'left' or self.arm == 'both'
@property
def has_two_arms(self):
return self.arm == 'both'
@property
def _gripper_base(self):
r_base = 'gripper_r_base'
l_base = 'gripper_l_base'
if self.arm == 'both':
return l_base, r_base
elif self.arm == 'right':
return r_base
else:
return l_base
def _set_sim_state(self, qpos, qvel):
assert qpos.shape == (self.sim.model.nq,) and qvel.shape == (self.sim.model.nv,)
old_state = self.sim.get_state()
new_state = mujoco_py.MjSimState(old_state.time, qpos, qvel,
old_state.act, old_state.udd_state)
self.sim.set_state(new_state)
self.sim.forward()
def _sample_safe_qpos(self, arm_joint_idx):
margin = np.pi/6
jnt_range = self.sim.model.jnt_range[arm_joint_idx].copy()
jnt_range[:, 0] += margin
jnt_range[:, 1] -= margin
return self.np_random.uniform(*jnt_range.T)
def _fk_position(self, left_arm_q=None, right_arm_q=None, restore_state=True):
grp_pos, old_state = None, None
if restore_state:
old_state = copy.deepcopy(self.sim.get_state())
if left_arm_q is not None:
assert right_arm_q is None
idx = self.sim.model.jnt_qposadr[self._arm_l_joint_idx]
self.sim.data.qpos[idx] = left_arm_q
self.sim.forward()
grp_pos = self.sim.data.get_site_xpos('gripper_l_center').copy()
if right_arm_q is not None:
assert left_arm_q is None
idx = self.sim.model.jnt_qposadr[self._arm_r_joint_idx]
self.sim.data.qpos[idx] = right_arm_q
self.sim.forward()
grp_pos = self.sim.data.get_site_xpos('gripper_r_center').copy()
if restore_state:
self.sim.set_state(old_state)
self.sim.forward()
return grp_pos
class YumiReachEnv(YumiEnv, EzPickle):
def __init__(self, **kwargs):
default_kwargs = dict(block_gripper=True, reward_type='sparse', distance_threshold=0.05)
merged = {**default_kwargs, **kwargs}
super().__init__(task=YumiTask.REACH, **merged)
EzPickle.__init__(self)
class YumiReachRightArmEnv(YumiReachEnv):
def __init__(self, **kwargs):
super().__init__(arm='right', **kwargs)
class YumiReachLeftArmEnv(YumiReachEnv):
def __init__(self, **kwargs):
super().__init__(arm='left', **kwargs)
class YumiReachTwoArmsEnv(YumiReachEnv):
def __init__(self, **kwargs):
super().__init__(arm='both', **kwargs)
class YumiBarEnv(YumiEnv, EzPickle):
def __init__(self, **kwargs):
default_kwargs = dict(arm='both', block_gripper=False, reward_type='sparse', distance_threshold=0.05)
merged = {**default_kwargs, **kwargs}
super().__init__(task=YumiTask.PICK_AND_PLACE_BAR, **merged)
EzPickle.__init__(self)
class YumiLiftEnv(YumiEnv, EzPickle):
def __init__(self, **kwargs):
default_kwargs = dict(block_gripper=False)
merged = {**default_kwargs, **kwargs}
super().__init__(task=YumiTask.LIFT_ABOVE_TABLE, arm='both', reward_type='dense', **merged)
EzPickle.__init__(self)
| [
"gym.envs.robotics.utils.reset_mocap_welds",
"gym.utils.EzPickle.__init__",
"os.path.dirname",
"numpy.zeros",
"numpy.all",
"numpy.clip",
"numpy.linalg.norm",
"gym.envs.robotics.utils.reset_mocap2body_xpos",
"mujoco_py.MjSimState",
"os.path.join",
"mujoco_py.functions.mj_contactForce",
"numpy.c... | [((357, 392), 'numpy.all', 'np.all', (['((a_min <= a) & (a <= a_max))'], {}), '((a_min <= a) & (a <= a_max))\n', (363, 392), True, 'import numpy as np\n'), ((418, 451), 'numpy.all', 'np.all', (['((a_min < a) & (a < a_max))'], {}), '((a_min < a) & (a < a_max))\n', (424, 451), True, 'import numpy as np\n'), ((1057, 1083), 'gym.envs.robotics.utils.reset_mocap2body_xpos', 'reset_mocap2body_xpos', (['sim'], {}), '(sim)\n', (1078, 1083), False, 'from gym.envs.robotics.utils import reset_mocap2body_xpos, reset_mocap_welds\n'), ((9875, 9906), 'gym.envs.robotics.utils.reset_mocap2body_xpos', 'reset_mocap2body_xpos', (['self.sim'], {}), '(self.sim)\n', (9896, 9906), False, 'from gym.envs.robotics.utils import reset_mocap2body_xpos, reset_mocap_welds\n'), ((10168, 10204), 'numpy.zeros', 'np.zeros', (['(self.sim.model.nmocap, 7)'], {}), '((self.sim.model.nmocap, 7))\n', (10176, 10204), True, 'import numpy as np\n'), ((17973, 17984), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (17981, 17984), True, 'import numpy as np\n'), ((18006, 18017), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (18014, 18017), True, 'import numpy as np\n'), ((18043, 18054), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (18051, 18054), True, 'import numpy as np\n'), ((18079, 18090), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (18087, 18090), True, 'import numpy as np\n'), ((18115, 18126), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (18123, 18126), True, 'import numpy as np\n'), ((18154, 18165), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (18162, 18165), True, 'import numpy as np\n'), ((18188, 18199), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (18196, 18199), True, 'import numpy as np\n'), ((18221, 18232), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (18229, 18232), True, 'import numpy as np\n'), ((18258, 18269), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (18266, 18269), True, 'import numpy as np\n'), ((18294, 18305), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (18302, 18305), True, 'import numpy as np\n'), ((18330, 18341), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (18338, 18341), True, 'import numpy as np\n'), ((18369, 18380), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (18377, 18380), True, 'import numpy as np\n'), ((19564, 19575), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (19572, 19575), True, 'import numpy as np\n'), ((19598, 19609), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (19606, 19609), True, 'import numpy as np\n'), ((19632, 19643), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (19640, 19643), True, 'import numpy as np\n'), ((21782, 22025), 'numpy.concatenate', 'np.concatenate', (['[arm_l_qpos, arm_l_qvel, gripper_l_qpos, arm_r_qpos, arm_r_qvel,\n gripper_r_qpos, gripper_l_pos, gripper_r_pos, gripper_l_vel,\n gripper_r_vel, gripper_l_to_obj, gripper_r_to_obj, object_pose,\n object_velp, object_velr]'], {}), '([arm_l_qpos, arm_l_qvel, gripper_l_qpos, arm_r_qpos,\n arm_r_qvel, gripper_r_qpos, gripper_l_pos, gripper_r_pos, gripper_l_vel,\n gripper_r_vel, gripper_l_to_obj, gripper_r_to_obj, object_pose,\n object_velp, object_velr])\n', (21796, 22025), True, 'import numpy as np\n'), ((22290, 22347), 'numpy.clip', 'np.clip', (['a', 'self.action_space.low', 'self.action_space.high'], {}), '(a, self.action_space.low, self.action_space.high)\n', (22297, 22347), True, 'import numpy as np\n'), ((23052, 23105), 'numpy.linalg.norm', 'np.linalg.norm', (['(achieved_goal - desired_goal)'], {'axis': '(-1)'}), '(achieved_goal - desired_goal, axis=-1)\n', (23066, 23105), True, 'import numpy as np\n'), ((25038, 25065), 'gym.envs.robotics.utils.reset_mocap_welds', 'reset_mocap_welds', (['self.sim'], {}), '(self.sim)\n', (25055, 25065), False, 'from gym.envs.robotics.utils import reset_mocap2body_xpos, reset_mocap_welds\n'), ((27957, 27982), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (27972, 27982), False, 'import os\n'), ((28835, 28924), 'mujoco_py.MjSimState', 'mujoco_py.MjSimState', (['old_state.time', 'qpos', 'qvel', 'old_state.act', 'old_state.udd_state'], {}), '(old_state.time, qpos, qvel, old_state.act, old_state.\n udd_state)\n', (28855, 28924), False, 'import mujoco_py\n'), ((30500, 30523), 'gym.utils.EzPickle.__init__', 'EzPickle.__init__', (['self'], {}), '(self)\n', (30517, 30523), False, 'from gym.utils import EzPickle, transformations as tf\n'), ((31204, 31227), 'gym.utils.EzPickle.__init__', 'EzPickle.__init__', (['self'], {}), '(self)\n', (31221, 31227), False, 'from gym.utils import EzPickle, transformations as tf\n'), ((31507, 31530), 'gym.utils.EzPickle.__init__', 'EzPickle.__init__', (['self'], {}), '(self)\n', (31524, 31530), False, 'from gym.utils import EzPickle, transformations as tf\n'), ((9386, 9411), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (9401, 9411), False, 'import os\n'), ((15079, 15132), 'numpy.linalg.norm', 'np.linalg.norm', (['(achieved_goal - desired_goal)'], {'axis': '(-1)'}), '(achieved_goal - desired_goal, axis=-1)\n', (15093, 15132), True, 'import numpy as np\n'), ((18710, 18738), 'numpy.clip', 'np.clip', (['arm_l_qvel', '(-10)', '(10)'], {}), '(arm_l_qvel, -10, 10)\n', (18717, 18738), True, 'import numpy as np\n'), ((19221, 19249), 'numpy.clip', 'np.clip', (['arm_r_qvel', '(-10)', '(10)'], {}), '(arm_r_qvel, -10, 10)\n', (19228, 19249), True, 'import numpy as np\n'), ((19786, 19797), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (19794, 19797), True, 'import numpy as np\n'), ((23382, 23393), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (23390, 23393), True, 'import numpy as np\n'), ((28018, 28065), 'os.path.join', 'os.path.join', (['root_dir', '"""assets/misc/yumi.urdf"""'], {}), "(root_dir, 'assets/misc/yumi.urdf')\n", (28030, 28065), False, 'import os\n'), ((13393, 13422), 'numpy.zeros', 'np.zeros', (['(6)'], {'dtype': 'np.float64'}), '(6, dtype=np.float64)\n', (13401, 13422), True, 'import numpy as np\n'), ((13439, 13507), 'mujoco_py.functions.mj_contactForce', 'mujoco_py.functions.mj_contactForce', (['sim.model', 'sim.data', 'i', 'c_force'], {}), '(sim.model, sim.data, i, c_force)\n', (13474, 13507), False, 'import mujoco_py\n'), ((20577, 20588), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (20585, 20588), True, 'import numpy as np\n'), ((23699, 23710), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (23707, 23710), True, 'import numpy as np\n'), ((20904, 20915), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (20912, 20915), True, 'import numpy as np\n'), ((21308, 21319), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (21316, 21319), True, 'import numpy as np\n')] |
import bert_pytorch
import numpy as np
import sys,os
import json
from transformers import AutoTokenizer, AutoModel
import torch
import ijson
import torch.nn as nn
from tqdm import tqdm
import random
from multiprocessing import set_start_method
try:
set_start_method('spawn')
except RuntimeError:
pass
part_list = ['v','vd','vn','n','nz','nt']
part_list_back = ['a','ad','an','d','i','l','s']
class text_embed:
'''
1. 对中文文本和视频进行预处理的--一一对应
2. 中文信息分词,提取bert向量(降维?)
3. 输入训练模型
'''
def __init__(self, language = None):
if language == 'Chinese':
self.tokenizer = AutoTokenizer.from_pretrained('bert-base-chinese')
self.bert = AutoModel.from_pretrained('bert-base-chinese').cuda()
elif language == "English":
# bert-base-uncased means english == English
self.tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
self.bert = AutoModel.from_pretrained('bert-base-uncased').cuda()
assert language != None
self.bert.eval()
# bert = BertModel.from_pretrained('bert-base-chinese')
def process_ch(self,texts):
# 加载分词,处理文本
tokens, segments, input_masks = [], [], []
for text in texts:
tokenized_text = self.tokenizer.tokenize(text) #用tokenizer对句子分词
indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokenized_text)#索引列表
tokens.append(indexed_tokens)
segments.append([0] * len(indexed_tokens))
input_masks.append([1] * len(indexed_tokens))
max_len = max([len(single) for single in tokens]) #最大的句子长度
for j in range(len(tokens)):
padding = [0] * (max_len - len(tokens[j]))
tokens[j] += padding
segments[j] += padding
input_masks[j] += padding
# 加载数据 to GPU
if torch.cuda.is_available():
tokens_tensor = torch.tensor(tokens).cuda()
segments_tensors = torch.tensor(segments).cuda()
input_masks_tensors = torch.tensor(input_masks).cuda()
else:
print('cuda 有点问题呀')
tokens_tensor = torch.tensor(tokens)
segments_tensors = torch.tensor(segments)
input_masks_tensors = torch.tensor(input_masks)
# 特征获取
with torch.no_grad():
features = self.bert(tokens_tensor, input_masks_tensors, segments_tensors)
# 每一个token有一个维度的特征,batch*token_max_len*768,特征的第二个维度上为batch*768
embed_out = [features[0].cpu().numpy(), features[1].cpu().numpy()]
return embed_out
def process_en(self, texts):
# 加载分词,处理文本
tokens, segments, input_masks = [], [], []
for text in texts:
tokenized_text = self.tokenizer.tokenize(text) #用tokenizer对句子分词
indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokenized_text)#索引列表
tokens.append(indexed_tokens)
segments.append([0] * len(indexed_tokens))
input_masks.append([1] * len(indexed_tokens))
max_len = max([len(single) for single in tokens]) #最大的句子长度
for j in range(len(tokens)):
padding = [0] * (max_len - len(tokens[j]))
tokens[j] += padding
segments[j] += padding
input_masks[j] += padding
# 加载数据 to GPU
if torch.cuda.is_available():
tokens_tensor = torch.tensor(tokens).cuda()
segments_tensors = torch.tensor(segments).cuda()
input_masks_tensors = torch.tensor(input_masks).cuda()
else:
print('cuda 有点问题呀')
tokens_tensor = torch.tensor(tokens)
segments_tensors = torch.tensor(segments)
input_masks_tensors = torch.tensor(input_masks)
# 特征获取
with torch.no_grad():
features = self.bert(tokens_tensor, input_masks_tensors, segments_tensors)
# 每一个token有一个维度的特征,batch*token_max_len*768,特征的第二个维度上为batch*768
embed = features[1]
return embed.cpu().numpy()
def save_embeds(self, path_dir, da_js):
with open(path_dir+'.txt','w') as w:
data = json.dumps(da_js , ensure_ascii=False)
w.writelines(data)
def save_embeds_ndarray(self, path_dir, features):
word_embed, setence_embed, ch_pos = features
np.savez(path_dir+'.npz',
word_embed=word_embed, setence_embed=setence_embed,
text_pos = ch_pos)
def vatex_process():
with open('/home/fengkai/dataset/vatex/vatex_training_v1.0.json', 'r') as r:
rr = r.readlines()
line = rr[0]
# line = line.split( ' {"videoID": ')
da_js = json.loads(line)
text_processer = text_embed('Chinese')
path_dir = '/home/fengkai/dataset/vatex/text_embed_info/train_mean_multi_np'
for video_txt_info in tqdm(da_js):
info = {}
videoID = video_txt_info.get('videoID','')
ch_caps = video_txt_info.get('chCap','')
# ch_embeds = []
# for key ,ch_cap in enumerate(ch_caps):
ch_caps = [ch_caps[4]]
ch_pos = PartOfSpeech(ch_caps)
features = text_processer.process_ch(ch_caps)
word_embed = features[0][0]
setence_embed = features[1][0]
path_dir_id = os.path.join(path_dir, videoID)
data = (word_embed, setence_embed, ch_pos)
# for index, chembed in enumerate(ch_embeds):
# path_dir_out = path_dir_id+'_'+str(index)
# text_processer.save_embeds_ndarray(path_dir_out, chembed)
# ch_mean_embeds = np.mean(ch_embeds, axis=0)
text_processer.save_embeds_ndarray(path_dir_id, data)
def msrvtt_process():
path = {x:'/home/fengkai/dataset/msrvtt/msrvtt10k'+x+'/TextData/'+'msrvtt10k'+x+'.caption.txt' for x in ['val', 'test']}
path_dir = {x:'/home/fengkai/dataset/msrvtt/msrvtt10k'+x+'/TextData/' for x in ['val', 'test']}
for key, path_value in path.items():
with open(path_value, 'r') as r:
rr = r.readlines()
info = {}
data = []
for line in rr:
line = line.strip().split(' ', maxsplit=1)
ID = line[0].split('#', maxsplit=1)
setenceID = ID[1]
videoID = ID[0]
text = line[1]
data.append([videoID, text])
data.sort()
for da in data:
id = da[0]
text = da[1]
if id in info:
info[id]['Text'].append(text)
else:
info[id] = {'videoID':id,
'Text':[text],
'Embed':[]}
text_processer = text_embed('English')
for k,v in tqdm(info.items()):
if len(v['Text']) != 20:
print('Wrong')
assert False
embed = text_processer.process_en(v["Text"])
info[k]['Embed'] = embed
text_processer.save_embeds(path_dir[key],v)
def pos_embed(embeddings_index, words, word_parts):
"""
docstring
"""
word_tem,text_part = [],[]
for word, part in zip(words, word_parts):
if part in part_list:
word_tem.append(word)
text_part.append(part)
text_part_outs = []
embed_part_outs = []
for word, part in zip(word_tem,text_part):
if word in embeddings_index:
word_vector = embeddings_index[word]
text_part_outs.append(word+' '+part)
embed_part_outs.append(word_vector)
if len(text_part_outs) < 2:
for word, part in zip(words, word_parts):
if part in part_list_back:
word_tem.append(word)
text_part.append(part)
for word, part in zip(word_tem,text_part):
if word in embeddings_index:
word_vector = embeddings_index[word]
text_part_outs.append(word+' '+part)
embed_part_outs.append(word_vector)
if len(text_part_outs) < 2:
for word, part in zip(words, word_parts):
word_tem.append(word)
text_part.append(part)
for word, part in zip(word_tem,text_part):
if word in embeddings_index:
word_vector = embeddings_index[word]
text_part_outs.append(word+' '+part)
embed_part_outs.append(word_vector)
text_part_outs = np.array(text_part_outs[:2])
embed_part_outs = np.array(embed_part_outs[:2])
return text_part_outs, embed_part_outs
def word2vector() -> dict:
"""
加载词向量的模型
return
"""
path = '/home/fengkai/model/word2vector_chinese'
r = open(path, 'r', encoding='utf-8')
line = r.readline()
# word_num, word_dim = map(int, line.split())
embeddings_index = {}
lines = r.readlines()
for data in tqdm(lines):
data_list = data.strip().split(' ')
word = data_list[0].strip()
embeddings_index[word] = np.asarray(data_list[1:], dtype='float32')
return embeddings_index
def word_pipeline():
wrong = 0
with open('/home/fengkai/dataset/vatex/vatex_training_v1.0.json', 'r') as r:
rr = r.readlines()
line = rr[0]
# line = line.split( ' {"videoID": ')
da_js = json.loads(line)
path_dir = '/home/fengkai/dataset/vatex/text_embed_info/train_wordvector_pos'
embeddings_index = word2vector()
for video_txt_info in tqdm(da_js):
videoID = video_txt_info.get('videoID','')
ch_caps = video_txt_info.get('chCap','')
ch_caps = [ch_caps[4]]
words, flags = PartOfSpeech(ch_caps)
path_dir_id = os.path.join(path_dir, videoID)
text_part_outs, embed_part_outs = pos_embed(embeddings_index, words, flags)
if len(text_part_outs) != 2:
wrong += 1
print(videoID)
print(ch_caps)
print(text_part_outs)
print(embed_part_outs)
np.savez(path_dir_id+'.npz',
text_part_outs=text_part_outs,
embed_part_outs=embed_part_outs)
print('不满两个词向量的共这么多:')
print(wrong)
def PartOfSpeech(sentence):
'''
1. 文本读取,每一个视频ID对应10条句子,数据整理
2. 文本词性提取,整理为{non: verb:,adj:}
3. 向量提取,chembed_fine:[{non: verb:,adj:},{}。。。{}]
:return:
'''
word_out,flag_out = [],[]
for sent in sentence:
sentence_seged = jieba.posseg.cut(sent.strip())
outstr = ''
for x in sentence_seged:
outstr+="{}/{},".format(x.word,x.flag)
word_out.append(x.word)
flag_out.append(x.flag)
return word_out, flag_out
if __name__ == "__main__":
word_pipeline()
# for en_cap in en_caps:
# en_embed = process_en(en_cap)
# en_embeds.append(en_embed)
# video_txt_info['enEmebd'] = en_embeds
# ## 向量str化
# # for key, ch_embed in enumerate(ch_embeds):
# video_txt_info['chEmbed'] = ch_embeds
# text_processer.save_embeds(video_txt_info) | [
"tqdm.tqdm",
"json.loads",
"numpy.asarray",
"multiprocessing.set_start_method",
"json.dumps",
"transformers.AutoModel.from_pretrained",
"transformers.AutoTokenizer.from_pretrained",
"numpy.array",
"torch.cuda.is_available",
"numpy.savez",
"torch.no_grad",
"os.path.join",
"torch.tensor"
] | [((255, 280), 'multiprocessing.set_start_method', 'set_start_method', (['"""spawn"""'], {}), "('spawn')\n", (271, 280), False, 'from multiprocessing import set_start_method\n'), ((4898, 4909), 'tqdm.tqdm', 'tqdm', (['da_js'], {}), '(da_js)\n', (4902, 4909), False, 'from tqdm import tqdm\n'), ((8515, 8543), 'numpy.array', 'np.array', (['text_part_outs[:2]'], {}), '(text_part_outs[:2])\n', (8523, 8543), True, 'import numpy as np\n'), ((8566, 8595), 'numpy.array', 'np.array', (['embed_part_outs[:2]'], {}), '(embed_part_outs[:2])\n', (8574, 8595), True, 'import numpy as np\n'), ((8951, 8962), 'tqdm.tqdm', 'tqdm', (['lines'], {}), '(lines)\n', (8955, 8962), False, 'from tqdm import tqdm\n'), ((9538, 9549), 'tqdm.tqdm', 'tqdm', (['da_js'], {}), '(da_js)\n', (9542, 9549), False, 'from tqdm import tqdm\n'), ((1869, 1894), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1892, 1894), False, 'import torch\n'), ((3366, 3391), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3389, 3391), False, 'import torch\n'), ((4379, 4480), 'numpy.savez', 'np.savez', (["(path_dir + '.npz')"], {'word_embed': 'word_embed', 'setence_embed': 'setence_embed', 'text_pos': 'ch_pos'}), "(path_dir + '.npz', word_embed=word_embed, setence_embed=\n setence_embed, text_pos=ch_pos)\n", (4387, 4480), True, 'import numpy as np\n'), ((4731, 4747), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (4741, 4747), False, 'import json\n'), ((5324, 5355), 'os.path.join', 'os.path.join', (['path_dir', 'videoID'], {}), '(path_dir, videoID)\n', (5336, 5355), False, 'import sys, os\n'), ((9077, 9119), 'numpy.asarray', 'np.asarray', (['data_list[1:]'], {'dtype': '"""float32"""'}), "(data_list[1:], dtype='float32')\n", (9087, 9119), True, 'import numpy as np\n'), ((9376, 9392), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (9386, 9392), False, 'import json\n'), ((9749, 9780), 'os.path.join', 'os.path.join', (['path_dir', 'videoID'], {}), '(path_dir, videoID)\n', (9761, 9780), False, 'import sys, os\n'), ((10056, 10154), 'numpy.savez', 'np.savez', (["(path_dir_id + '.npz')"], {'text_part_outs': 'text_part_outs', 'embed_part_outs': 'embed_part_outs'}), "(path_dir_id + '.npz', text_part_outs=text_part_outs,\n embed_part_outs=embed_part_outs)\n", (10064, 10154), True, 'import numpy as np\n'), ((614, 664), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['"""bert-base-chinese"""'], {}), "('bert-base-chinese')\n", (643, 664), False, 'from transformers import AutoTokenizer, AutoModel\n'), ((2154, 2174), 'torch.tensor', 'torch.tensor', (['tokens'], {}), '(tokens)\n', (2166, 2174), False, 'import torch\n'), ((2206, 2228), 'torch.tensor', 'torch.tensor', (['segments'], {}), '(segments)\n', (2218, 2228), False, 'import torch\n'), ((2263, 2288), 'torch.tensor', 'torch.tensor', (['input_masks'], {}), '(input_masks)\n', (2275, 2288), False, 'import torch\n'), ((2330, 2345), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2343, 2345), False, 'import torch\n'), ((3651, 3671), 'torch.tensor', 'torch.tensor', (['tokens'], {}), '(tokens)\n', (3663, 3671), False, 'import torch\n'), ((3703, 3725), 'torch.tensor', 'torch.tensor', (['segments'], {}), '(segments)\n', (3715, 3725), False, 'import torch\n'), ((3760, 3785), 'torch.tensor', 'torch.tensor', (['input_masks'], {}), '(input_masks)\n', (3772, 3785), False, 'import torch\n'), ((3827, 3842), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3840, 3842), False, 'import torch\n'), ((4191, 4228), 'json.dumps', 'json.dumps', (['da_js'], {'ensure_ascii': '(False)'}), '(da_js, ensure_ascii=False)\n', (4201, 4228), False, 'import json\n'), ((866, 916), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['"""bert-base-uncased"""'], {}), "('bert-base-uncased')\n", (895, 916), False, 'from transformers import AutoTokenizer, AutoModel\n'), ((689, 735), 'transformers.AutoModel.from_pretrained', 'AutoModel.from_pretrained', (['"""bert-base-chinese"""'], {}), "('bert-base-chinese')\n", (714, 735), False, 'from transformers import AutoTokenizer, AutoModel\n'), ((1924, 1944), 'torch.tensor', 'torch.tensor', (['tokens'], {}), '(tokens)\n', (1936, 1944), False, 'import torch\n'), ((1983, 2005), 'torch.tensor', 'torch.tensor', (['segments'], {}), '(segments)\n', (1995, 2005), False, 'import torch\n'), ((2047, 2072), 'torch.tensor', 'torch.tensor', (['input_masks'], {}), '(input_masks)\n', (2059, 2072), False, 'import torch\n'), ((3421, 3441), 'torch.tensor', 'torch.tensor', (['tokens'], {}), '(tokens)\n', (3433, 3441), False, 'import torch\n'), ((3480, 3502), 'torch.tensor', 'torch.tensor', (['segments'], {}), '(segments)\n', (3492, 3502), False, 'import torch\n'), ((3544, 3569), 'torch.tensor', 'torch.tensor', (['input_masks'], {}), '(input_masks)\n', (3556, 3569), False, 'import torch\n'), ((941, 987), 'transformers.AutoModel.from_pretrained', 'AutoModel.from_pretrained', (['"""bert-base-uncased"""'], {}), "('bert-base-uncased')\n", (966, 987), False, 'from transformers import AutoTokenizer, AutoModel\n')] |
#!/usr/bin/env python
# coding: utf8
#
# Copyright (c) 2015, <NAME> <<EMAIL>>
# Copyright (c) 2021 Centre National d'Etudes Spatiales (CNES).
#
# This file is part of PANDORA_MCCNN
#
# https://github.com/CNES/Pandora_MCCNN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains all functions to load and preprocess dataset
"""
import math
from torch.utils import data
import numpy as np
import h5py
import cv2
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
class DataFusionContestGenerator(data.Dataset):
"""
Generate data fusion dataset for training
"""
def __init__(self, sample_hdf, image_hdf, cfg):
"""
Initialization
"""
self.patch_size = 11
self.data = None
self.image = []
# Corespondance between the index of the image in self.image, and the image number of the hdf5 file
self.id_image = []
sample_file = h5py.File(sample_hdf, "r")
image_file = h5py.File(image_hdf, "r")
for dst in sample_file.keys():
if self.data is None:
self.data = sample_file[dst][:]
else:
self.data = np.concatenate((self.data, sample_file[dst][:]), axis=0)
self.image.append(image_file[dst][:])
self.id_image.append(int(sample_file[dst][0, 0]))
self.neg_low = float(cfg["dataset_neg_low"])
self.neg_high = float(cfg["dataset_neg_high"])
self.pos = float(cfg["dataset_pos"])
self.disp_vert = float(cfg["vertical_disp"])
self.transformation = cfg["data_augmentation"]
self.scale = float(cfg["augmentation_param"]["scale"])
self.hscale = float(cfg["augmentation_param"]["hscale"])
self.hshear = float(cfg["augmentation_param"]["hshear"])
self.trans = float(cfg["augmentation_param"]["trans"])
self.rotate = float(cfg["augmentation_param"]["rotate"])
self.brightness = float(cfg["augmentation_param"]["brightness"])
self.contrast = float(cfg["augmentation_param"]["contrast"])
self.d_hscale = float(cfg["augmentation_param"]["d_hscale"])
self.d_hshear = float(cfg["augmentation_param"]["d_hshear"])
self.d_vtrans = float(cfg["augmentation_param"]["d_vtrans"])
self.d_rotate = float(cfg["augmentation_param"]["d_rotate"])
self.d_brightness = float(cfg["augmentation_param"]["d_brightness"])
self.d_contrast = float(cfg["augmentation_param"]["d_contrast"])
def __getitem__(self, index):
"""
Generates one sample : the left patch, the right positive patch, the right negative patch
:return: left patch, right positive patch, right negative patch
:rtype: np.array(3, patch_size, patch_size)
"""
# Make patch
row = int(self.data[index, 1])
col = int(self.data[index, 2])
disp = int(self.data[index, 3])
id_data = self.id_image.index(int(self.data[index, 0]))
# Patch radius
radius = int(self.patch_size / 2)
width = self.image[id_data].shape[2] - radius
# Column of the positive example
x_pos = -1
while x_pos < 0 or x_pos >= width:
x_pos = int((col - disp) + np.random.uniform(-self.pos, self.pos))
# Column of the negative example
x_neg = -1
while x_neg < 0 or x_neg >= width:
x_neg = int((col - disp) + np.random.uniform(self.neg_low, self.neg_high))
height = self.image[id_data].shape[1] - radius
y_disp = -1
while y_disp < 0 or y_disp >= height:
y_disp = int(row + np.random.uniform(-self.disp_vert, self.disp_vert))
if self.transformation:
# Calculates random data augmentation
rand_scale = np.random.uniform(self.scale, 1)
scale = [rand_scale * np.random.uniform(self.hscale, 1), rand_scale]
hshear = np.random.uniform(-self.hshear, self.hshear)
trans = [np.random.uniform(-self.trans, self.trans), np.random.uniform(-self.trans, self.trans)]
phi = np.random.uniform(-self.rotate * math.pi / 180.0, self.rotate * math.pi / 180.0)
brightness = np.random.uniform(-self.brightness, self.brightness)
contrast = np.random.uniform(1.0 / self.contrast, self.contrast)
left = self.data_augmentation(
self.image[id_data][0, :, :], row, col, scale, phi, trans, hshear, brightness, contrast
)
scale__ = [scale[0] * np.random.uniform(self.d_hscale, 1), scale[1]]
hshear_ = hshear + np.random.uniform(-self.d_hshear, self.d_hshear)
trans_ = [trans[0], trans[1] + np.random.uniform(-self.d_vtrans, self.d_vtrans)]
phi_ = phi + np.random.uniform(-self.d_rotate * math.pi / 180.0, self.d_rotate * math.pi / 180.0)
brightness_ = brightness + np.random.uniform(-self.d_brightness, self.d_brightness)
contrast_ = contrast * np.random.uniform(1 / self.d_contrast, self.d_contrast)
right_pos = self.data_augmentation(
self.image[id_data][1, :, :], y_disp, x_pos, scale__, phi_, trans_, hshear_, brightness_, contrast_
)
right_neg = self.data_augmentation(
self.image[id_data][1, :, :], y_disp, x_neg, scale__, phi_, trans_, hshear_, brightness_, contrast_
)
else:
# Make the left patch
left = self.image[id_data][0, row - radius : row + radius + 1, col - radius : col + radius + 1]
# Make the right positive patch
right_pos = self.image[id_data][
1, y_disp - radius : y_disp + radius + 1, x_pos - radius : radius + x_pos + 1
]
# Make the right negative patch
right_neg = self.image[id_data][
1, y_disp - radius : y_disp + radius + 1, x_neg - radius : radius + x_neg + 1
]
return np.stack((left, right_pos, right_neg), axis=0)
def __len__(self):
"""
Return the total number of samples
"""
return self.data.shape[0]
def data_augmentation(self, src, row, col, scale, phi, trans, hshear, brightness, contrast):
"""
Return augmented patch : apply affine transformations
:param src: source image
:param row: row center of the patch
:param col: col center of the patch
:param scale: scale factor
:param phi: rotation factor
:param trans: translation factor
:param hshear: shear factor in horizontal direction
:param brightness: brightness
:param contrast: contrast
:return: the augmented patch
:rtype: np.array(self.patch_size, self.patch_size)
"""
homo_matrix = np.array([[1, 0, -col], [0, 1, -row], [0, 0, 1]])
translation_matrix = np.array([[1, 0, trans[0]], [0, 1, trans[1]], [0, 0, 1]])
homo_matrix = np.matmul(translation_matrix, homo_matrix)
scale_matrix = np.array([[scale[0], 0, 0], [0, scale[1], 0], [0, 0, 1]])
homo_matrix = np.matmul(scale_matrix, homo_matrix)
cos_phi = math.cos(phi)
sin_phi = math.sin(phi)
rotate_matrix = np.array([[cos_phi, sin_phi, 0], [-sin_phi, cos_phi, 0], [0, 0, 1]])
homo_matrix = np.matmul(rotate_matrix, homo_matrix)
shear_matrix = np.array([[1, hshear, 0], [0, 1, 0], [0, 0, 1]])
homo_matrix = np.matmul(shear_matrix, homo_matrix)
translation_matrix = np.array([[1, 0, (self.patch_size - 1) / 2], [0, 1, (self.patch_size - 1) / 2]])
homo_matrix = np.matmul(translation_matrix, homo_matrix)
dst = cv2.warpAffine(src, homo_matrix, (self.patch_size, self.patch_size))
dst *= contrast
dst += brightness
return dst
| [
"numpy.stack",
"numpy.random.uniform",
"h5py.File",
"math.sin",
"cv2.warpAffine",
"numpy.array",
"math.cos",
"numpy.matmul",
"numpy.concatenate"
] | [((1465, 1491), 'h5py.File', 'h5py.File', (['sample_hdf', '"""r"""'], {}), "(sample_hdf, 'r')\n", (1474, 1491), False, 'import h5py\n'), ((1513, 1538), 'h5py.File', 'h5py.File', (['image_hdf', '"""r"""'], {}), "(image_hdf, 'r')\n", (1522, 1538), False, 'import h5py\n'), ((6502, 6548), 'numpy.stack', 'np.stack', (['(left, right_pos, right_neg)'], {'axis': '(0)'}), '((left, right_pos, right_neg), axis=0)\n', (6510, 6548), True, 'import numpy as np\n'), ((7342, 7391), 'numpy.array', 'np.array', (['[[1, 0, -col], [0, 1, -row], [0, 0, 1]]'], {}), '([[1, 0, -col], [0, 1, -row], [0, 0, 1]])\n', (7350, 7391), True, 'import numpy as np\n'), ((7421, 7478), 'numpy.array', 'np.array', (['[[1, 0, trans[0]], [0, 1, trans[1]], [0, 0, 1]]'], {}), '([[1, 0, trans[0]], [0, 1, trans[1]], [0, 0, 1]])\n', (7429, 7478), True, 'import numpy as np\n'), ((7501, 7543), 'numpy.matmul', 'np.matmul', (['translation_matrix', 'homo_matrix'], {}), '(translation_matrix, homo_matrix)\n', (7510, 7543), True, 'import numpy as np\n'), ((7568, 7625), 'numpy.array', 'np.array', (['[[scale[0], 0, 0], [0, scale[1], 0], [0, 0, 1]]'], {}), '([[scale[0], 0, 0], [0, scale[1], 0], [0, 0, 1]])\n', (7576, 7625), True, 'import numpy as np\n'), ((7648, 7684), 'numpy.matmul', 'np.matmul', (['scale_matrix', 'homo_matrix'], {}), '(scale_matrix, homo_matrix)\n', (7657, 7684), True, 'import numpy as np\n'), ((7704, 7717), 'math.cos', 'math.cos', (['phi'], {}), '(phi)\n', (7712, 7717), False, 'import math\n'), ((7736, 7749), 'math.sin', 'math.sin', (['phi'], {}), '(phi)\n', (7744, 7749), False, 'import math\n'), ((7774, 7842), 'numpy.array', 'np.array', (['[[cos_phi, sin_phi, 0], [-sin_phi, cos_phi, 0], [0, 0, 1]]'], {}), '([[cos_phi, sin_phi, 0], [-sin_phi, cos_phi, 0], [0, 0, 1]])\n', (7782, 7842), True, 'import numpy as np\n'), ((7865, 7902), 'numpy.matmul', 'np.matmul', (['rotate_matrix', 'homo_matrix'], {}), '(rotate_matrix, homo_matrix)\n', (7874, 7902), True, 'import numpy as np\n'), ((7927, 7975), 'numpy.array', 'np.array', (['[[1, hshear, 0], [0, 1, 0], [0, 0, 1]]'], {}), '([[1, hshear, 0], [0, 1, 0], [0, 0, 1]])\n', (7935, 7975), True, 'import numpy as np\n'), ((7998, 8034), 'numpy.matmul', 'np.matmul', (['shear_matrix', 'homo_matrix'], {}), '(shear_matrix, homo_matrix)\n', (8007, 8034), True, 'import numpy as np\n'), ((8065, 8150), 'numpy.array', 'np.array', (['[[1, 0, (self.patch_size - 1) / 2], [0, 1, (self.patch_size - 1) / 2]]'], {}), '([[1, 0, (self.patch_size - 1) / 2], [0, 1, (self.patch_size - 1) / 2]]\n )\n', (8073, 8150), True, 'import numpy as np\n'), ((8168, 8210), 'numpy.matmul', 'np.matmul', (['translation_matrix', 'homo_matrix'], {}), '(translation_matrix, homo_matrix)\n', (8177, 8210), True, 'import numpy as np\n'), ((8226, 8294), 'cv2.warpAffine', 'cv2.warpAffine', (['src', 'homo_matrix', '(self.patch_size, self.patch_size)'], {}), '(src, homo_matrix, (self.patch_size, self.patch_size))\n', (8240, 8294), False, 'import cv2\n'), ((4320, 4352), 'numpy.random.uniform', 'np.random.uniform', (['self.scale', '(1)'], {}), '(self.scale, 1)\n', (4337, 4352), True, 'import numpy as np\n'), ((4455, 4499), 'numpy.random.uniform', 'np.random.uniform', (['(-self.hshear)', 'self.hshear'], {}), '(-self.hshear, self.hshear)\n', (4472, 4499), True, 'import numpy as np\n'), ((4627, 4712), 'numpy.random.uniform', 'np.random.uniform', (['(-self.rotate * math.pi / 180.0)', '(self.rotate * math.pi / 180.0)'], {}), '(-self.rotate * math.pi / 180.0, self.rotate * math.pi / 180.0\n )\n', (4644, 4712), True, 'import numpy as np\n'), ((4733, 4785), 'numpy.random.uniform', 'np.random.uniform', (['(-self.brightness)', 'self.brightness'], {}), '(-self.brightness, self.brightness)\n', (4750, 4785), True, 'import numpy as np\n'), ((4809, 4862), 'numpy.random.uniform', 'np.random.uniform', (['(1.0 / self.contrast)', 'self.contrast'], {}), '(1.0 / self.contrast, self.contrast)\n', (4826, 4862), True, 'import numpy as np\n'), ((1707, 1763), 'numpy.concatenate', 'np.concatenate', (['(self.data, sample_file[dst][:])'], {'axis': '(0)'}), '((self.data, sample_file[dst][:]), axis=0)\n', (1721, 1763), True, 'import numpy as np\n'), ((4521, 4563), 'numpy.random.uniform', 'np.random.uniform', (['(-self.trans)', 'self.trans'], {}), '(-self.trans, self.trans)\n', (4538, 4563), True, 'import numpy as np\n'), ((4565, 4607), 'numpy.random.uniform', 'np.random.uniform', (['(-self.trans)', 'self.trans'], {}), '(-self.trans, self.trans)\n', (4582, 4607), True, 'import numpy as np\n'), ((5138, 5186), 'numpy.random.uniform', 'np.random.uniform', (['(-self.d_hshear)', 'self.d_hshear'], {}), '(-self.d_hshear, self.d_hshear)\n', (5155, 5186), True, 'import numpy as np\n'), ((5305, 5393), 'numpy.random.uniform', 'np.random.uniform', (['(-self.d_rotate * math.pi / 180.0)', '(self.d_rotate * math.pi / 180.0)'], {}), '(-self.d_rotate * math.pi / 180.0, self.d_rotate * math.pi /\n 180.0)\n', (5322, 5393), True, 'import numpy as np\n'), ((5429, 5485), 'numpy.random.uniform', 'np.random.uniform', (['(-self.d_brightness)', 'self.d_brightness'], {}), '(-self.d_brightness, self.d_brightness)\n', (5446, 5485), True, 'import numpy as np\n'), ((5521, 5576), 'numpy.random.uniform', 'np.random.uniform', (['(1 / self.d_contrast)', 'self.d_contrast'], {}), '(1 / self.d_contrast, self.d_contrast)\n', (5538, 5576), True, 'import numpy as np\n'), ((3775, 3813), 'numpy.random.uniform', 'np.random.uniform', (['(-self.pos)', 'self.pos'], {}), '(-self.pos, self.pos)\n', (3792, 3813), True, 'import numpy as np\n'), ((3958, 4004), 'numpy.random.uniform', 'np.random.uniform', (['self.neg_low', 'self.neg_high'], {}), '(self.neg_low, self.neg_high)\n', (3975, 4004), True, 'import numpy as np\n'), ((4160, 4210), 'numpy.random.uniform', 'np.random.uniform', (['(-self.disp_vert)', 'self.disp_vert'], {}), '(-self.disp_vert, self.disp_vert)\n', (4177, 4210), True, 'import numpy as np\n'), ((4387, 4420), 'numpy.random.uniform', 'np.random.uniform', (['self.hscale', '(1)'], {}), '(self.hscale, 1)\n', (4404, 4420), True, 'import numpy as np\n'), ((5060, 5095), 'numpy.random.uniform', 'np.random.uniform', (['self.d_hscale', '(1)'], {}), '(self.d_hscale, 1)\n', (5077, 5095), True, 'import numpy as np\n'), ((5230, 5278), 'numpy.random.uniform', 'np.random.uniform', (['(-self.d_vtrans)', 'self.d_vtrans'], {}), '(-self.d_vtrans, self.d_vtrans)\n', (5247, 5278), True, 'import numpy as np\n')] |
#coding:utf-8
import cv2
import json
import requests
import numpy as np
import time
import threading
import subprocess
import re
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
def get_ping_result(ip_address):
p = subprocess.Popen(["ping.exe", ip_address], stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
out = p.stdout.read().decode('gbk')
reg_receive = '已接收 = \d'
match_receive = re.search(reg_receive, out)
receive_count = -1
if match_receive:
receive_count = int(match_receive.group()[6:])
if receive_count > 0: # 接受到的反馈大于0,表示网络通
reg_min_time = '最短 = \d+ms'
reg_max_time = '最长 = \d+ms'
reg_avg_time = '平均 = \d+ms'
match_min_time = re.search(reg_min_time, out)
min_time = int(match_min_time.group()[5:-2])
match_max_time = re.search(reg_max_time, out)
max_time = int(match_max_time.group()[5:-2])
match_avg_time = re.search(reg_avg_time, out)
avg_time = int(match_avg_time.group()[5:-2])
return [receive_count, min_time, max_time, avg_time]
else:
print('网络不通,目标服务器不可达!')
return [0, 9999, 9999, 9999]
class PingThread(QThread):
ping_signal = pyqtSignal(int)
def __init__(self, ip):
super(PingThread, self).__init__()
self.ping = 0
self.flag = 1
self.ip = ip
def run(self):
while self.flag:
_, _, _, self.ping = get_ping_result(self.ip)
# print(self.ping)
self.ping_signal.emit(self.ping)
class HttpClient(QThread):
http_signal = pyqtSignal(dict)
def __init__(self, ip, port="8081"):
super(HttpClient, self).__init__()
self.ip = ip
self.port = port
self.headers = {'Connection': 'keep-alive'}
self.img = None
self.qmut = QMutex()
self.flag = 1
def encodeimg(self, img):
encode_param = [int(cv2.IMWRITE_PNG_COMPRESSION), 9]
result, imgencode = cv2.imencode('.png', img, encode_param)
data = np.array(imgencode)
data_Bytes = data.tobytes()
return data_Bytes
def parse_message(self):
pass
def post(self):
self.qmut.lock()
if self.img is None:
self.qmut.unlock()
return
res = {"image": str(self.encodeimg(self.img))} # img是ndarray,无法直接用base64编码,否则会报错
self.qmut.unlock()
start_time = time.time()
message = requests.post("http://"+self.ip+":"+self.port, headers=self.headers, data=json.dumps(res))
duration = time.time() - start_time
print('duration:[%.0fms]' % (duration * 1000))
self.http_signal.emit({"time": duration*1000})
data_Bytes = json.loads(message.text)
loc = np.frombuffer(eval(data_Bytes["loc"]), dtype="float16")
print(loc)
return loc
def run(self):
while self.flag:
self.post()
# if __name__ == "main":
httpclient = HttpClient(ip="10.104.0.241")
httpclient.img = cv2.imread("1.jpg")
httpclient.post()
| [
"subprocess.Popen",
"json.loads",
"json.dumps",
"time.time",
"cv2.imread",
"numpy.array",
"cv2.imencode",
"re.search"
] | [((3096, 3115), 'cv2.imread', 'cv2.imread', (['"""1.jpg"""'], {}), "('1.jpg')\n", (3106, 3115), False, 'import cv2\n'), ((255, 385), 'subprocess.Popen', 'subprocess.Popen', (["['ping.exe', ip_address]"], {'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'shell': '(True)'}), "(['ping.exe', ip_address], stdin=subprocess.PIPE, stdout=\n subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n", (271, 385), False, 'import subprocess\n'), ((496, 523), 're.search', 're.search', (['reg_receive', 'out'], {}), '(reg_receive, out)\n', (505, 523), False, 'import re\n'), ((806, 834), 're.search', 're.search', (['reg_min_time', 'out'], {}), '(reg_min_time, out)\n', (815, 834), False, 'import re\n'), ((914, 942), 're.search', 're.search', (['reg_max_time', 'out'], {}), '(reg_max_time, out)\n', (923, 942), False, 'import re\n'), ((1022, 1050), 're.search', 're.search', (['reg_avg_time', 'out'], {}), '(reg_avg_time, out)\n', (1031, 1050), False, 'import re\n'), ((2067, 2106), 'cv2.imencode', 'cv2.imencode', (['""".png"""', 'img', 'encode_param'], {}), "('.png', img, encode_param)\n", (2079, 2106), False, 'import cv2\n'), ((2122, 2141), 'numpy.array', 'np.array', (['imgencode'], {}), '(imgencode)\n', (2130, 2141), True, 'import numpy as np\n'), ((2512, 2523), 'time.time', 'time.time', ([], {}), '()\n', (2521, 2523), False, 'import time\n'), ((2808, 2832), 'json.loads', 'json.loads', (['message.text'], {}), '(message.text)\n', (2818, 2832), False, 'import json\n'), ((2652, 2663), 'time.time', 'time.time', ([], {}), '()\n', (2661, 2663), False, 'import time\n'), ((2616, 2631), 'json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (2626, 2631), False, 'import json\n')] |
# import gym
import numpy as np
import random
import tensorflow as tf
import tensorflow.contrib.slim as slim
import matplotlib.pyplot as plt
import scipy.misc
import os
import time
from gridmap import Map
env = Map(7)
class Qnetwork():
def __init__(self, h_size, rnn_cell, myScope, lr):
self.scalarInput = tf.placeholder(shape=[None,21168],dtype=tf.float32)
self.imageIn = tf.reshape(self.scalarInput,shape=[-1,84,84,3])
self.conv1 = slim.convolution2d( \
inputs=self.imageIn,num_outputs=32,\
kernel_size=[8,8],stride=[4,4],padding='VALID', \
biases_initializer=None,scope=myScope+'_conv1')
self.conv2 = slim.convolution2d( \
inputs=self.conv1,num_outputs=64,\
kernel_size=[4,4],stride=[2,2],padding='VALID', \
biases_initializer=None,scope=myScope+'_conv2')
self.conv3 = slim.convolution2d( \
inputs=self.conv2,num_outputs=64,\
kernel_size=[3,3],stride=[1,1],padding='VALID', \
biases_initializer=None,scope=myScope+'_conv3')
self.conv4 = slim.convolution2d( \
inputs=self.conv3,num_outputs=h_size,\
kernel_size=[7,7],stride=[1,1],padding='VALID', \
biases_initializer=None,scope=myScope+'_conv4')
self.trainLength = tf.placeholder(dtype=tf.int32)
#We take the output from the final convolutional layer and send it to a recurrent layer.
#The input must be reshaped into [batch x trace x units] for rnn processing,
#and then returned to [batch x units] when sent through the upper levles.
self.batch_size = tf.placeholder(dtype=tf.int32,shape=[])
self.convFlat = tf.reshape(slim.flatten(self.conv4),[self.batch_size,self.trainLength,h_size])
self.state_in = rnn_cell.zero_state(self.batch_size, tf.float32)
self.rnn,self.rnn_state = tf.nn.dynamic_rnn(\
inputs=self.convFlat,cell=rnn_cell,dtype=tf.float32,initial_state=self.state_in,scope=myScope+'_rnn')
self.rnn = tf.reshape(self.rnn,shape=[-1,h_size])
#The output from the recurrent player is then split into separate Value and Advantage streams
self.streamA,self.streamV = tf.split(self.rnn,2,1)
self.AW = tf.Variable(tf.random_normal([h_size//2,4]))
self.VW = tf.Variable(tf.random_normal([h_size//2,1]))
self.Advantage = tf.matmul(self.streamA,self.AW)
self.Value = tf.matmul(self.streamV,self.VW)
self.salience = tf.gradients(self.Advantage,self.imageIn)
#Then combine them together to get our final Q-values.
self.Qout = self.Value + tf.subtract(self.Advantage,tf.reduce_mean(self.Advantage,axis=1,keep_dims=True))
self.predict = tf.argmax(self.Qout,1)
#Below we obtain the loss by taking the sum of squares difference between the target and prediction Q values.
self.targetQ = tf.placeholder(shape=[None],dtype=tf.float32)
self.actions = tf.placeholder(shape=[None],dtype=tf.int32)
self.actions_onehot = tf.one_hot(self.actions,4,dtype=tf.float32)
self.Q = tf.reduce_sum(tf.multiply(self.Qout, self.actions_onehot), axis=1)
self.td_error = tf.square(self.targetQ - self.Q)
#In order to only propogate accurate gradients through the network, we will mask the first
#half of the losses for each trace as per Lample & Chatlot 2016
self.maskA = tf.zeros([self.batch_size,self.trainLength//2])
self.maskB = tf.ones([self.batch_size,self.trainLength//2])
self.mask = tf.concat([self.maskA,self.maskB],1)
self.mask = tf.reshape(self.mask,[-1])
self.loss = tf.reduce_mean(self.td_error * self.mask)
self.trainer = tf.train.AdamOptimizer(learning_rate=lr)
self.updateModel = self.trainer.minimize(self.loss)
class experience_buffer():
def __init__(self, buffer_size=1000):
self.buffer = []
self.buffer_size = buffer_size
def add(self,experience):
if len(self.buffer) + 1 >= self.buffer_size:
self.buffer[0:(1+len(self.buffer))-self.buffer_size] = []
self.buffer.append(experience)
def sample(self,batch_size,trace_length):
sampled_episodes = random.sample(self.buffer,batch_size)
sampledTraces = []
for episode in sampled_episodes:
point = np.random.randint(0,len(episode)+1-trace_length)
sampledTraces.append(episode[point:point+trace_length])
sampledTraces = np.array(sampledTraces)
return np.reshape(sampledTraces,[batch_size*trace_length,4])
def processState(states):
return np.reshape(states, [21168])
def updateTargetGraph(tfVars, sess):
total_vars = len(tfVars)
op_holder = []
for idx, var in enumerate(tfVars[0:total_vars // 2]):
op_holder.append(tfVars[idx+total_vars//2].assign((var.value()*tau) + ((1-tau)*tfVars[idx+total_vars//2].value())))
return op_holder
def updateTarget(op_holder,sess):
for op in op_holder:
sess.run(op)
def discount_rewards(r):
""" take 1D float array of rewards and compute discounted reward """
gamma = 0.99
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(range(0, r.size)):
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
lr = 0.0001
#Setting the training parameters
batch_size = 4 #How many experience traces to use for each training step.
trace_length = 8 #How long each experience trace will be when training
update_freq = 5 #How often to perform a training step.
y = .99 #Discount factor on the target Q-values
startE = 1 #Starting chance of random action
endE = 0.1 #Final chance of random action
anneling_steps = 10000 #How many steps of training to reduce startE to endE.
num_episodes = 10000 #How many episodes of game environment to train network with.
pre_train_steps = 10000 #How many steps of random actions before training begins.
load_model = False #Whether to load a saved model.
path = "./drqn" #The path to save our model to.
h_size = 512 #The size of the final convolutional layer before splitting it into Advantage and Value streams.
max_epLength = 50 #The max allowed length of our episode.
tau = 0.001 # Rate to update target network toward primary networktf.reset_default_graph()
if not os.path.exists(path):
os.makedirs(path)
tf.reset_default_graph()
#We define the cells for the primary and target q-networks
cell = tf.contrib.rnn.BasicLSTMCell(num_units=h_size,state_is_tuple=True)
cellT = tf.contrib.rnn.BasicLSTMCell(num_units=h_size,state_is_tuple=True)
mainQN = Qnetwork(h_size,cell,'main', lr)
targetQN = Qnetwork(h_size,cellT,'target', lr)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
trainables = tf.trainable_variables()
targetOps = updateTargetGraph(trainables, tau)
myBuffer = experience_buffer(50000)
e = startE
stepDrop = (startE - endE) / anneling_steps
rList = []
total_steps = 0
with tf.Session() as sess:
sess.run(init)
if load_model == True:
print('Loading Model...')
ckpt = tf.train.get_checkpoint_state(path)
saver.restore(sess, ckpt.model_checkpoint_path)
updateTarget(targetOps,sess)
s = env.reset()
s = processState(s)
for i in range(num_episodes):
episodeBuffer = experience_buffer()
# s = env.reset()
# s = processState(s)
rAll = 0
j = 0
state = (np.zeros([1,h_size]),np.zeros([1,h_size]))
# The Q-Network
# If the agent takes longer than max_epLength moves to reach either of the blocks, end the trial.
while j < max_epLength:
j += 1
#Choose an action by greedily (with e chance of random action) from the Q-network
if np.random.rand(1) < e or total_steps < pre_train_steps:
a = np.random.randint(0, env.actions)
state1 = sess.run(mainQN.rnn_state,\
feed_dict={mainQN.scalarInput:[s/255.0],mainQN.trainLength:1,mainQN.state_in:state,mainQN.batch_size:1})
else:
a, state1 = sess.run([mainQN.predict,mainQN.rnn_state],\
feed_dict={mainQN.scalarInput:[s/255.0],mainQN.trainLength:1,mainQN.state_in:state,mainQN.batch_size:1})
a = a[0]
s1, r = env.step(a)
s1 = processState(s1)
total_steps += 1
# Save the experience to our episode buffer.
episodeBuffer.add(np.reshape(np.array([s, a, r, s1]), [1, 4]))
if total_steps > pre_train_steps:
if e > endE:
e -= stepDrop
if total_steps % (update_freq) == 0:
updateTarget(targetOps,sess)
#Reset the recurrent layer's hidden state
state_train = (np.zeros([batch_size,h_size]),np.zeros([batch_size,h_size]))
trainBatch = myBuffer.sample(batch_size,trace_length) #Get a random batch of experiences.
#Below we perform the Double-DQN update to the target Q-values
Q1 = sess.run(mainQN.predict,feed_dict={\
mainQN.scalarInput:np.vstack(trainBatch[:,3]/255.0),\
mainQN.trainLength:trace_length,mainQN.state_in:state_train,mainQN.batch_size:batch_size})
Q2 = sess.run(targetQN.Qout,feed_dict={\
targetQN.scalarInput:np.vstack(trainBatch[:,3]/255.0),\
targetQN.trainLength:trace_length,targetQN.state_in:state_train,targetQN.batch_size:batch_size})
doubleQ = Q2[range(batch_size*trace_length),Q1]
targetQ = trainBatch[:,2] + (y*doubleQ)
#Update the network with our target values.
sess.run(mainQN.updateModel, \
feed_dict={mainQN.scalarInput:np.vstack(trainBatch[:,0]/255.0),mainQN.targetQ:targetQ,\
mainQN.actions:trainBatch[:,1],mainQN.trainLength:trace_length,\
mainQN.state_in:state_train,mainQN.batch_size:batch_size})
rAll += r
s = s1
state = state1
#Add the discounted experiences to our experience buffer.
myBuffer.add(episodeBuffer.buffer)
rList.append(rAll)
#Periodically save the model.
if i % 100 == 0:
saver.save(sess, path + '/model-' + str(i) + '.ckpt')
print("Saved Model")
if len(rList) % 10 == 0:
print(str(i), np.mean(rList[-10:]), e)
saver.save(sess, path + '/model-' + str(i) + '.ckpt')
print("Percent of succesful episodes: " + str(sum(rList) / num_episodes) + "%")
| [
"tensorflow.trainable_variables",
"random.sample",
"tensorflow.reset_default_graph",
"tensorflow.reshape",
"tensorflow.matmul",
"tensorflow.multiply",
"numpy.random.randint",
"numpy.mean",
"tensorflow.split",
"numpy.zeros_like",
"tensorflow.one_hot",
"os.path.exists",
"tensorflow.concat",
... | [((212, 218), 'gridmap.Map', 'Map', (['(7)'], {}), '(7)\n', (215, 218), False, 'from gridmap import Map\n'), ((6475, 6499), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (6497, 6499), True, 'import tensorflow as tf\n'), ((6566, 6633), 'tensorflow.contrib.rnn.BasicLSTMCell', 'tf.contrib.rnn.BasicLSTMCell', ([], {'num_units': 'h_size', 'state_is_tuple': '(True)'}), '(num_units=h_size, state_is_tuple=True)\n', (6594, 6633), True, 'import tensorflow as tf\n'), ((6641, 6708), 'tensorflow.contrib.rnn.BasicLSTMCell', 'tf.contrib.rnn.BasicLSTMCell', ([], {'num_units': 'h_size', 'state_is_tuple': '(True)'}), '(num_units=h_size, state_is_tuple=True)\n', (6669, 6708), True, 'import tensorflow as tf\n'), ((6805, 6838), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6836, 6838), True, 'import tensorflow as tf\n'), ((6847, 6863), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (6861, 6863), True, 'import tensorflow as tf\n'), ((6877, 6901), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (6899, 6901), True, 'import tensorflow as tf\n'), ((4718, 4745), 'numpy.reshape', 'np.reshape', (['states', '[21168]'], {}), '(states, [21168])\n', (4728, 4745), True, 'import numpy as np\n'), ((5252, 5268), 'numpy.zeros_like', 'np.zeros_like', (['r'], {}), '(r)\n', (5265, 5268), True, 'import numpy as np\n'), ((6430, 6450), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (6444, 6450), False, 'import os\n'), ((6456, 6473), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (6467, 6473), False, 'import os\n'), ((7075, 7087), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (7085, 7087), True, 'import tensorflow as tf\n'), ((321, 374), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None, 21168]', 'dtype': 'tf.float32'}), '(shape=[None, 21168], dtype=tf.float32)\n', (335, 374), True, 'import tensorflow as tf\n'), ((396, 447), 'tensorflow.reshape', 'tf.reshape', (['self.scalarInput'], {'shape': '[-1, 84, 84, 3]'}), '(self.scalarInput, shape=[-1, 84, 84, 3])\n', (406, 447), True, 'import tensorflow as tf\n'), ((465, 631), 'tensorflow.contrib.slim.convolution2d', 'slim.convolution2d', ([], {'inputs': 'self.imageIn', 'num_outputs': '(32)', 'kernel_size': '[8, 8]', 'stride': '[4, 4]', 'padding': '"""VALID"""', 'biases_initializer': 'None', 'scope': "(myScope + '_conv1')"}), "(inputs=self.imageIn, num_outputs=32, kernel_size=[8, 8],\n stride=[4, 4], padding='VALID', biases_initializer=None, scope=myScope +\n '_conv1')\n", (483, 631), True, 'import tensorflow.contrib.slim as slim\n'), ((679, 843), 'tensorflow.contrib.slim.convolution2d', 'slim.convolution2d', ([], {'inputs': 'self.conv1', 'num_outputs': '(64)', 'kernel_size': '[4, 4]', 'stride': '[2, 2]', 'padding': '"""VALID"""', 'biases_initializer': 'None', 'scope': "(myScope + '_conv2')"}), "(inputs=self.conv1, num_outputs=64, kernel_size=[4, 4],\n stride=[2, 2], padding='VALID', biases_initializer=None, scope=myScope +\n '_conv2')\n", (697, 843), True, 'import tensorflow.contrib.slim as slim\n'), ((891, 1055), 'tensorflow.contrib.slim.convolution2d', 'slim.convolution2d', ([], {'inputs': 'self.conv2', 'num_outputs': '(64)', 'kernel_size': '[3, 3]', 'stride': '[1, 1]', 'padding': '"""VALID"""', 'biases_initializer': 'None', 'scope': "(myScope + '_conv3')"}), "(inputs=self.conv2, num_outputs=64, kernel_size=[3, 3],\n stride=[1, 1], padding='VALID', biases_initializer=None, scope=myScope +\n '_conv3')\n", (909, 1055), True, 'import tensorflow.contrib.slim as slim\n'), ((1103, 1273), 'tensorflow.contrib.slim.convolution2d', 'slim.convolution2d', ([], {'inputs': 'self.conv3', 'num_outputs': 'h_size', 'kernel_size': '[7, 7]', 'stride': '[1, 1]', 'padding': '"""VALID"""', 'biases_initializer': 'None', 'scope': "(myScope + '_conv4')"}), "(inputs=self.conv3, num_outputs=h_size, kernel_size=[7, 7\n ], stride=[1, 1], padding='VALID', biases_initializer=None, scope=\n myScope + '_conv4')\n", (1121, 1273), True, 'import tensorflow.contrib.slim as slim\n'), ((1326, 1356), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32'}), '(dtype=tf.int32)\n', (1340, 1356), True, 'import tensorflow as tf\n'), ((1648, 1688), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '[]'}), '(dtype=tf.int32, shape=[])\n', (1662, 1688), True, 'import tensorflow as tf\n'), ((1898, 2027), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', ([], {'inputs': 'self.convFlat', 'cell': 'rnn_cell', 'dtype': 'tf.float32', 'initial_state': 'self.state_in', 'scope': "(myScope + '_rnn')"}), "(inputs=self.convFlat, cell=rnn_cell, dtype=tf.float32,\n initial_state=self.state_in, scope=myScope + '_rnn')\n", (1915, 2027), True, 'import tensorflow as tf\n'), ((2055, 2095), 'tensorflow.reshape', 'tf.reshape', (['self.rnn'], {'shape': '[-1, h_size]'}), '(self.rnn, shape=[-1, h_size])\n', (2065, 2095), True, 'import tensorflow as tf\n'), ((2232, 2256), 'tensorflow.split', 'tf.split', (['self.rnn', '(2)', '(1)'], {}), '(self.rnn, 2, 1)\n', (2240, 2256), True, 'import tensorflow as tf\n'), ((2406, 2438), 'tensorflow.matmul', 'tf.matmul', (['self.streamA', 'self.AW'], {}), '(self.streamA, self.AW)\n', (2415, 2438), True, 'import tensorflow as tf\n'), ((2459, 2491), 'tensorflow.matmul', 'tf.matmul', (['self.streamV', 'self.VW'], {}), '(self.streamV, self.VW)\n', (2468, 2491), True, 'import tensorflow as tf\n'), ((2524, 2566), 'tensorflow.gradients', 'tf.gradients', (['self.Advantage', 'self.imageIn'], {}), '(self.Advantage, self.imageIn)\n', (2536, 2566), True, 'import tensorflow as tf\n'), ((2766, 2789), 'tensorflow.argmax', 'tf.argmax', (['self.Qout', '(1)'], {}), '(self.Qout, 1)\n', (2775, 2789), True, 'import tensorflow as tf\n'), ((2939, 2985), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None]', 'dtype': 'tf.float32'}), '(shape=[None], dtype=tf.float32)\n', (2953, 2985), True, 'import tensorflow as tf\n'), ((3008, 3052), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[None]', 'dtype': 'tf.int32'}), '(shape=[None], dtype=tf.int32)\n', (3022, 3052), True, 'import tensorflow as tf\n'), ((3082, 3127), 'tensorflow.one_hot', 'tf.one_hot', (['self.actions', '(4)'], {'dtype': 'tf.float32'}), '(self.actions, 4, dtype=tf.float32)\n', (3092, 3127), True, 'import tensorflow as tf\n'), ((3252, 3284), 'tensorflow.square', 'tf.square', (['(self.targetQ - self.Q)'], {}), '(self.targetQ - self.Q)\n', (3261, 3284), True, 'import tensorflow as tf\n'), ((3486, 3536), 'tensorflow.zeros', 'tf.zeros', (['[self.batch_size, self.trainLength // 2]'], {}), '([self.batch_size, self.trainLength // 2])\n', (3494, 3536), True, 'import tensorflow as tf\n'), ((3555, 3604), 'tensorflow.ones', 'tf.ones', (['[self.batch_size, self.trainLength // 2]'], {}), '([self.batch_size, self.trainLength // 2])\n', (3562, 3604), True, 'import tensorflow as tf\n'), ((3622, 3660), 'tensorflow.concat', 'tf.concat', (['[self.maskA, self.maskB]', '(1)'], {}), '([self.maskA, self.maskB], 1)\n', (3631, 3660), True, 'import tensorflow as tf\n'), ((3679, 3706), 'tensorflow.reshape', 'tf.reshape', (['self.mask', '[-1]'], {}), '(self.mask, [-1])\n', (3689, 3706), True, 'import tensorflow as tf\n'), ((3726, 3767), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(self.td_error * self.mask)'], {}), '(self.td_error * self.mask)\n', (3740, 3767), True, 'import tensorflow as tf\n'), ((3800, 3840), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (3822, 3840), True, 'import tensorflow as tf\n'), ((4319, 4357), 'random.sample', 'random.sample', (['self.buffer', 'batch_size'], {}), '(self.buffer, batch_size)\n', (4332, 4357), False, 'import random\n'), ((4586, 4609), 'numpy.array', 'np.array', (['sampledTraces'], {}), '(sampledTraces)\n', (4594, 4609), True, 'import numpy as np\n'), ((4625, 4682), 'numpy.reshape', 'np.reshape', (['sampledTraces', '[batch_size * trace_length, 4]'], {}), '(sampledTraces, [batch_size * trace_length, 4])\n', (4635, 4682), True, 'import numpy as np\n'), ((7192, 7227), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['path'], {}), '(path)\n', (7221, 7227), True, 'import tensorflow as tf\n'), ((1723, 1747), 'tensorflow.contrib.slim.flatten', 'slim.flatten', (['self.conv4'], {}), '(self.conv4)\n', (1735, 1747), True, 'import tensorflow.contrib.slim as slim\n'), ((2285, 2319), 'tensorflow.random_normal', 'tf.random_normal', (['[h_size // 2, 4]'], {}), '([h_size // 2, 4])\n', (2301, 2319), True, 'import tensorflow as tf\n'), ((2348, 2382), 'tensorflow.random_normal', 'tf.random_normal', (['[h_size // 2, 1]'], {}), '([h_size // 2, 1])\n', (2364, 2382), True, 'import tensorflow as tf\n'), ((3166, 3209), 'tensorflow.multiply', 'tf.multiply', (['self.Qout', 'self.actions_onehot'], {}), '(self.Qout, self.actions_onehot)\n', (3177, 3209), True, 'import tensorflow as tf\n'), ((7544, 7565), 'numpy.zeros', 'np.zeros', (['[1, h_size]'], {}), '([1, h_size])\n', (7552, 7565), True, 'import numpy as np\n'), ((7565, 7586), 'numpy.zeros', 'np.zeros', (['[1, h_size]'], {}), '([1, h_size])\n', (7573, 7586), True, 'import numpy as np\n'), ((2689, 2743), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.Advantage'], {'axis': '(1)', 'keep_dims': '(True)'}), '(self.Advantage, axis=1, keep_dims=True)\n', (2703, 2743), True, 'import tensorflow as tf\n'), ((7954, 7987), 'numpy.random.randint', 'np.random.randint', (['(0)', 'env.actions'], {}), '(0, env.actions)\n', (7971, 7987), True, 'import numpy as np\n'), ((10721, 10741), 'numpy.mean', 'np.mean', (['rList[-10:]'], {}), '(rList[-10:])\n', (10728, 10741), True, 'import numpy as np\n'), ((7878, 7895), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (7892, 7895), True, 'import numpy as np\n'), ((8601, 8624), 'numpy.array', 'np.array', (['[s, a, r, s1]'], {}), '([s, a, r, s1])\n', (8609, 8624), True, 'import numpy as np\n'), ((8945, 8975), 'numpy.zeros', 'np.zeros', (['[batch_size, h_size]'], {}), '([batch_size, h_size])\n', (8953, 8975), True, 'import numpy as np\n'), ((8975, 9005), 'numpy.zeros', 'np.zeros', (['[batch_size, h_size]'], {}), '([batch_size, h_size])\n', (8983, 9005), True, 'import numpy as np\n'), ((9326, 9361), 'numpy.vstack', 'np.vstack', (['(trainBatch[:, 3] / 255.0)'], {}), '(trainBatch[:, 3] / 255.0)\n', (9335, 9361), True, 'import numpy as np\n'), ((9582, 9617), 'numpy.vstack', 'np.vstack', (['(trainBatch[:, 3] / 255.0)'], {}), '(trainBatch[:, 3] / 255.0)\n', (9591, 9617), True, 'import numpy as np\n'), ((10056, 10091), 'numpy.vstack', 'np.vstack', (['(trainBatch[:, 0] / 255.0)'], {}), '(trainBatch[:, 0] / 255.0)\n', (10065, 10091), True, 'import numpy as np\n')] |
import numpy as np
from sys import stdin
import functools as fntls
import operator as op
def step(g, alg, stepnum):
h = len(g)
w = len(g[0])
res = g.copy()
for i in range(1, h - 1):
for j in range(1, w - 1):
lookup = int(''.join(
map(str, map(int, g[i - 1:i + 2, j - 1:j + 2].flatten()))),
base=2)
res[i, j] = alg[lookup]
res = res[1:-1, 1:-1]
res = np.pad(res, 2, constant_values=stepnum % 2 == 0)
return res
def solve():
gs = groups()
algo = list(map(fntls.partial(op.eq, '#'), gs[0][0]))
g = np.array([list(map(fntls.partial(op.eq, '#'), l)) for l in gs[1]])
g = np.pad(g, 2)
for i in range(50):
g = step(g, algo, i)
print(sum(g.flatten()))
def groups():
return [g.split('\n') for g in stdin.read().strip().split('\n\n')]
if __name__ == '__main__':
solve()
| [
"numpy.pad",
"functools.partial",
"sys.stdin.read"
] | [((452, 500), 'numpy.pad', 'np.pad', (['res', '(2)'], {'constant_values': '(stepnum % 2 == 0)'}), '(res, 2, constant_values=stepnum % 2 == 0)\n', (458, 500), True, 'import numpy as np\n'), ((692, 704), 'numpy.pad', 'np.pad', (['g', '(2)'], {}), '(g, 2)\n', (698, 704), True, 'import numpy as np\n'), ((570, 595), 'functools.partial', 'fntls.partial', (['op.eq', '"""#"""'], {}), "(op.eq, '#')\n", (583, 595), True, 'import functools as fntls\n'), ((636, 661), 'functools.partial', 'fntls.partial', (['op.eq', '"""#"""'], {}), "(op.eq, '#')\n", (649, 661), True, 'import functools as fntls\n'), ((839, 851), 'sys.stdin.read', 'stdin.read', ([], {}), '()\n', (849, 851), False, 'from sys import stdin\n')] |
'print the most likely path of all the utterances of a dataset'
import argparse
import os
import pickle
import sys
import numpy as np
import beer
EPS = 1e-5
def setup(parser):
parser.add_argument('-S', '--state', action='store_true',
help='state level posteriors')
parser.add_argument('-l', '--log', action='store_true',
help='log domain')
parser.add_argument('-s', '--acoustic-scale', default=1., type=float,
help='scaling factor of the acoustic model')
parser.add_argument('-u', '--utts',
help='decode the given utterances ("-") for stdin')
parser.add_argument('model', help='hmm based model')
parser.add_argument('dataset', help='training data set')
parser.add_argument('outdir', help='output directory')
def state2phone(posts, start_pdf, end_pdf):
retval = np.zeros((len(posts), len(start_pdf)))
for i, unit in enumerate(start_pdf):
start, end = start_pdf[unit], end_pdf[unit]
retval[:, i] = posts[:, start:end].sum(axis=-1)
return retval
def main(args, logger):
logger.debug('load the model')
with open(args.model, 'rb') as f:
model = pickle.load(f)
logger.debug('load the dataset')
with open(args.dataset, 'rb') as f:
dataset = pickle.load(f)
if args.utts:
if args.utts == '-':
utts = [line.strip().split()[0] for line in sys.stdin.readlines()]
else:
with open(args.utts, 'r') as f:
utts = [line.strip().split()[0] for line in f.readlines()]
else:
utts = list([utt.id for utt in dataset.utterances(random_order=False)])
count = 0
for uttname in utts:
try:
utt = dataset[uttname]
except KeyError as err:
logger.warning(f'no data for utterance {uttname}')
continue
logger.debug(f'processing utterance: {utt.id}')
posts = model.posteriors(utt.features, scale=args.acoustic_scale)
posts = posts.detach().numpy()
if not args.state:
posts = state2phone(posts, model.start_pdf, model.end_pdf)
if args.log:
posts = np.log(EPS + posts)
path = os.path.join(args.outdir, f'{uttname}.npy')
np.save(path, posts)
count += 1
logger.info(f'successfully computed the posteriors for {count} utterances.')
if __name__ == "__main__":
main()
| [
"numpy.save",
"numpy.log",
"pickle.load",
"sys.stdin.readlines",
"os.path.join"
] | [((1219, 1233), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1230, 1233), False, 'import pickle\n'), ((1330, 1344), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1341, 1344), False, 'import pickle\n'), ((2242, 2285), 'os.path.join', 'os.path.join', (['args.outdir', 'f"""{uttname}.npy"""'], {}), "(args.outdir, f'{uttname}.npy')\n", (2254, 2285), False, 'import os\n'), ((2294, 2314), 'numpy.save', 'np.save', (['path', 'posts'], {}), '(path, posts)\n', (2301, 2314), True, 'import numpy as np\n'), ((2207, 2226), 'numpy.log', 'np.log', (['(EPS + posts)'], {}), '(EPS + posts)\n', (2213, 2226), True, 'import numpy as np\n'), ((1449, 1470), 'sys.stdin.readlines', 'sys.stdin.readlines', ([], {}), '()\n', (1468, 1470), False, 'import sys\n')] |
# here i take all walkers and do trace plots, corner plots and histograms
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import poisson, norm, bernoulli, expon, uniform, beta, gamma, multinomial, multivariate_normal
from scipy.stats import rv_histogram
from scipy.special import digamma
import random
from scipy.special import gamma as gamma_function
from scipy.special import gammaln
from scipy.special import factorial
from scipy.special import beta as beta_function
from sklearn.preprocessing import OneHotEncoder
from scipy.stats import dirichlet
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score, roc_curve
import corner
import necessary_functions as nf
from emcee import autocorr
data_dir = sys.argv[1]
nwalkers = int(sys.argv[2])
N=int(sys.argv[3])
T=int(sys.argv[4])
burnout=int(sys.argv[5])
keep_every=int(sys.argv[6])
Nprior=10
data = np.loadtxt(data_dir+'/processed_data.dat')
data_smeared = np.loadtxt(data_dir+'/processed_data_smeared.dat')
labels=data[:,2]
f1=np.sum(labels==1)/len(labels)
ohe_nj=OneHotEncoder(handle_unknown='error')
ohe_nb=OneHotEncoder(handle_unknown='error')
Y1=ohe_nj.fit_transform(data[:,0].reshape(-1,1)).toarray()
Y2=ohe_nb.fit_transform(data[:,1].reshape(-1,1)).toarray()
X=[]
for n in range(Y1.shape[0]):
X.append([Y1[n],Y2[n]])
true_alphas=np.zeros((2,Y1.shape[1]))
true_betas=np.zeros((2,Y2.shape[1]))
for k in range(2):
true_alphas[k]=np.mean(Y1[labels==k],axis=0)
true_betas[k]=np.mean(Y2[labels==k],axis=0)
Y1_smeared=ohe_nj.transform(data_smeared[:,0].reshape(-1,1)).toarray()
Y2_smeared=ohe_nb.transform(data_smeared[:,1].reshape(-1,1)).toarray()
fake_alphas=np.zeros((2,Y1.shape[1]))
fake_betas=np.zeros((2,Y2.shape[1]))
for k in range(2):
fake_alphas[k]=np.mean(Y1_smeared[data_smeared[:,2]==k],axis=0)
fake_betas[k]=np.mean(Y2_smeared[data_smeared[:,2]==k],axis=0)
K=true_alphas.shape[0]
dj=true_alphas.shape[1]
db=true_betas.shape[1]
Z_list=np.zeros((nwalkers,T,N,K))
pie_list=np.zeros((nwalkers,T,K))
alphas_list=np.zeros((nwalkers,T,K,dj))
betas_list=np.zeros((nwalkers,T,K,db))
pie_prior_list=np.zeros((nwalkers*T,K))
alphas_prior_list=np.zeros((nwalkers*T,K,dj))
betas_prior_list=np.zeros((nwalkers*T,K,db))
for walker in range(nwalkers):
Z_list[walker]=np.load(data_dir+'/walker_'+str(walker+1)+'/Z_list.npy')
pie_list[walker]=np.load(data_dir+'/walker_'+str(walker+1)+'/pie_list.npy')
alphas_list[walker]=np.load(data_dir+'/walker_'+str(walker+1)+'/alphas_list.npy')
betas_list[walker]=np.load(data_dir+'/walker_'+str(walker+1)+'/betas_list.npy')
pie_list_all_walkers=np.zeros((nwalkers*T,K))
alphas_list_all_walkers=np.zeros((nwalkers*T,K,dj))
betas_list_all_walkers=np.zeros((nwalkers*T,K,db))
for walker in range(nwalkers):
pie_list_all_walkers[walker*T:(walker+1)*T]=pie_list[walker]
alphas_list_all_walkers[walker*T:(walker+1)*T]=alphas_list[walker]
betas_list_all_walkers[walker*T:(walker+1)*T]=betas_list[walker]
pie_prior_list=dirichlet.rvs(size=nwalkers*T,alpha=np.ones(2))
alphas_prior_list[:,0,:]=dirichlet.rvs(size=nwalkers*T,alpha=Nprior*fake_alphas[0])
alphas_prior_list[:,1,:]=dirichlet.rvs(size=nwalkers*T,alpha=Nprior*fake_alphas[1])
betas_prior_list[:,0,:]=dirichlet.rvs(size=nwalkers*T,alpha=Nprior*fake_betas[0])
betas_prior_list[:,1,:]=dirichlet.rvs(size=nwalkers*T,alpha=Nprior*fake_betas[0])
dim=K+K*dj+K*db
autocorrelations=np.zeros(dim)
autocorrelations[0]=autocorr.integrated_time(pie_list_all_walkers[:,0].reshape(-1,1),tol=1000)
autocorrelations[1]=autocorr.integrated_time(pie_list_all_walkers[:,1].reshape(-1,1),tol=1000)
README=open(data_dir +'/README.txt',"wt")
README.write('pi0: Prior %8.3f \t Posterior %8.3f \n'% (autocorr.integrated_time(pie_prior_list[:,1].reshape(-1,1),tol=1000),autocorr.integrated_time(pie_list_all_walkers[:,0].reshape(-1,1),tol=1000)))
README.write('pi1: Prior %8.3f \t Posterior %8.3f \n'% (autocorr.integrated_time(pie_prior_list[:,1].reshape(-1,1),tol=1000),autocorr.integrated_time(pie_list_all_walkers[:,1].reshape(-1,1),tol=1000)))
for j in range(dj):
README.write('alpha0%d\t: Prior %8.3f \t Posterior %8.3f \n'% (j,autocorr.integrated_time(alphas_prior_list[:,0,j].reshape(-1,1),tol=1000),autocorr.integrated_time(alphas_list_all_walkers[:,0,j].reshape(-1,1),tol=1000)))
README.write('alpha1%d\t: Prior %8.3f \t Posterior %8.3f \n'% (j,autocorr.integrated_time(alphas_prior_list[:,1,j].reshape(-1,1),tol=1000),autocorr.integrated_time(alphas_list_all_walkers[:,1,j].reshape(-1,1),tol=1000)))
for b in range(db):
README.write('beta0%d\t: Prior %8.3f \t Posterior %8.3f \n'% (b,autocorr.integrated_time(betas_prior_list[:,0,b].reshape(-1,1),tol=1000),autocorr.integrated_time(betas_list_all_walkers[:,0,b].reshape(-1,1),tol=1000)))
README.write('beta1%d\t: Prior %8.3f \t Posterior %8.3f \n'% (b,autocorr.integrated_time(betas_prior_list[:,1,b].reshape(-1,1),tol=1000),autocorr.integrated_time(betas_list_all_walkers[:,1,b].reshape(-1,1),tol=1000)))
README.close()
Nbis = np.exp(np.linspace(np.log(100), np.log(nwalkers*T), 10)).astype(int)
new = np.empty(len(Nbis))
for i, n in enumerate(Nbis):
new[i] = autocorr.integrated_time(pie_list[:,1].reshape(-1,1),tol=1000)
# Plot the comparisons
plt.loglog(Nbis, new, "o-", label="new")
ylim = plt.gca().get_ylim()
plt.plot(Nbis, Nbis / 1000.0, "--k", label=r"$\tau = N/1000$")
plt.ylim(ylim)
plt.xlabel("number of samples, $N$")
plt.ylabel(r"$\tau$ estimates for $\pi_{1}$")
plt.legend(fontsize=14)
plt.savefig(data_dir+'/pi1_tau_estimates.pdf')
plt.savefig(data_dir+'/pi1_tau_estimates.png')
# here I thin the files automatically
tau_max=int(round(np.max(autocorrelations),0))
README=open(data_dir +'/README.txt',"a")
README.write('Thinned again with tau %d \n'% tau_max)
README.write('pi0: Posterior %8.3f \t Thinned Posterior %8.3f \n'% (autocorrelations[0],nf.autocorr_new(nf.thin_a_sample(pie_list[:,:,0],tau_max))))
README.write('pi1: Posterior %8.3f \t Thinned Posterior %8.3f \n'% (autocorrelations[1],nf.autocorr_new(nf.thin_a_sample(pie_list[:,:,1],tau_max))))
for j in range(dj):
README.write('alpha0%d\t: Posterior %8.3f \t Thinned Posterior %8.3f \n'% (j,autocorrelations[K+j],nf.autocorr_new(nf.thin_a_sample(alphas_list[:,:,0,j],tau_max))))
README.write('alpha1%d\t: Posterior %8.3f \t Thinned Posterior %8.3f \n'% (j,autocorrelations[K+dj+j],nf.autocorr_new(nf.thin_a_sample(alphas_list[:,:,1,j],tau_max))))
for b in range(db):
README.write('beta0%d\t: Posterior %8.3f \t Thinned Posterior %8.3f \n'% (b,autocorrelations[K+K*dj+b],nf.autocorr_new(nf.thin_a_sample(betas_list[:,:,0,b],tau_max))))
README.write('beta1%d\t: Posterior %8.3f \t Thinned Posterior %8.3f \n'% (b,autocorrelations[K+K*dj+db+b],nf.autocorr_new(nf.thin_a_sample(betas_list[:,:,1,b],tau_max))))
README.close()
np.save(data_dir+'/thinned_Z_list.npy',nf.thin_a_sample(Z_list[:,:-1],tau_max))#this is for the uncorrected files where the last Z is a zero matrix
np.save(data_dir+'/thinned_pie_list.npy',nf.thin_a_sample(pie_list,tau_max))
np.save(data_dir+'/thinned_alphas_list.npy',nf.thin_a_sample(alphas_list,tau_max))
np.save(data_dir+'/thinned_betas_list.npy',nf.thin_a_sample(betas_list,tau_max))
| [
"matplotlib.pyplot.loglog",
"necessary_functions.thin_a_sample",
"numpy.sum",
"numpy.log",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"numpy.zeros",
"sklearn.preprocessing.OneHotEncoder",
"numpy.ones",
"scipy.stats.dirichlet.rvs",
"numpy.max",
"numpy.mean... | [((936, 980), 'numpy.loadtxt', 'np.loadtxt', (["(data_dir + '/processed_data.dat')"], {}), "(data_dir + '/processed_data.dat')\n", (946, 980), True, 'import numpy as np\n'), ((994, 1046), 'numpy.loadtxt', 'np.loadtxt', (["(data_dir + '/processed_data_smeared.dat')"], {}), "(data_dir + '/processed_data_smeared.dat')\n", (1004, 1046), True, 'import numpy as np\n'), ((1104, 1141), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""error"""'}), "(handle_unknown='error')\n", (1117, 1141), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((1149, 1186), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""error"""'}), "(handle_unknown='error')\n", (1162, 1186), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((1379, 1405), 'numpy.zeros', 'np.zeros', (['(2, Y1.shape[1])'], {}), '((2, Y1.shape[1]))\n', (1387, 1405), True, 'import numpy as np\n'), ((1416, 1442), 'numpy.zeros', 'np.zeros', (['(2, Y2.shape[1])'], {}), '((2, Y2.shape[1]))\n', (1424, 1442), True, 'import numpy as np\n'), ((1711, 1737), 'numpy.zeros', 'np.zeros', (['(2, Y1.shape[1])'], {}), '((2, Y1.shape[1]))\n', (1719, 1737), True, 'import numpy as np\n'), ((1748, 1774), 'numpy.zeros', 'np.zeros', (['(2, Y2.shape[1])'], {}), '((2, Y2.shape[1]))\n', (1756, 1774), True, 'import numpy as np\n'), ((2007, 2036), 'numpy.zeros', 'np.zeros', (['(nwalkers, T, N, K)'], {}), '((nwalkers, T, N, K))\n', (2015, 2036), True, 'import numpy as np\n'), ((2043, 2069), 'numpy.zeros', 'np.zeros', (['(nwalkers, T, K)'], {}), '((nwalkers, T, K))\n', (2051, 2069), True, 'import numpy as np\n'), ((2080, 2110), 'numpy.zeros', 'np.zeros', (['(nwalkers, T, K, dj)'], {}), '((nwalkers, T, K, dj))\n', (2088, 2110), True, 'import numpy as np\n'), ((2119, 2149), 'numpy.zeros', 'np.zeros', (['(nwalkers, T, K, db)'], {}), '((nwalkers, T, K, db))\n', (2127, 2149), True, 'import numpy as np\n'), ((2163, 2190), 'numpy.zeros', 'np.zeros', (['(nwalkers * T, K)'], {}), '((nwalkers * T, K))\n', (2171, 2190), True, 'import numpy as np\n'), ((2206, 2237), 'numpy.zeros', 'np.zeros', (['(nwalkers * T, K, dj)'], {}), '((nwalkers * T, K, dj))\n', (2214, 2237), True, 'import numpy as np\n'), ((2251, 2282), 'numpy.zeros', 'np.zeros', (['(nwalkers * T, K, db)'], {}), '((nwalkers * T, K, db))\n', (2259, 2282), True, 'import numpy as np\n'), ((2651, 2678), 'numpy.zeros', 'np.zeros', (['(nwalkers * T, K)'], {}), '((nwalkers * T, K))\n', (2659, 2678), True, 'import numpy as np\n'), ((2700, 2731), 'numpy.zeros', 'np.zeros', (['(nwalkers * T, K, dj)'], {}), '((nwalkers * T, K, dj))\n', (2708, 2731), True, 'import numpy as np\n'), ((2751, 2782), 'numpy.zeros', 'np.zeros', (['(nwalkers * T, K, db)'], {}), '((nwalkers * T, K, db))\n', (2759, 2782), True, 'import numpy as np\n'), ((3098, 3161), 'scipy.stats.dirichlet.rvs', 'dirichlet.rvs', ([], {'size': '(nwalkers * T)', 'alpha': '(Nprior * fake_alphas[0])'}), '(size=nwalkers * T, alpha=Nprior * fake_alphas[0])\n', (3111, 3161), False, 'from scipy.stats import dirichlet\n'), ((3182, 3245), 'scipy.stats.dirichlet.rvs', 'dirichlet.rvs', ([], {'size': '(nwalkers * T)', 'alpha': '(Nprior * fake_alphas[1])'}), '(size=nwalkers * T, alpha=Nprior * fake_alphas[1])\n', (3195, 3245), False, 'from scipy.stats import dirichlet\n'), ((3265, 3327), 'scipy.stats.dirichlet.rvs', 'dirichlet.rvs', ([], {'size': '(nwalkers * T)', 'alpha': '(Nprior * fake_betas[0])'}), '(size=nwalkers * T, alpha=Nprior * fake_betas[0])\n', (3278, 3327), False, 'from scipy.stats import dirichlet\n'), ((3347, 3409), 'scipy.stats.dirichlet.rvs', 'dirichlet.rvs', ([], {'size': '(nwalkers * T)', 'alpha': '(Nprior * fake_betas[0])'}), '(size=nwalkers * T, alpha=Nprior * fake_betas[0])\n', (3360, 3409), False, 'from scipy.stats import dirichlet\n'), ((3441, 3454), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (3449, 3454), True, 'import numpy as np\n'), ((5271, 5311), 'matplotlib.pyplot.loglog', 'plt.loglog', (['Nbis', 'new', '"""o-"""'], {'label': '"""new"""'}), "(Nbis, new, 'o-', label='new')\n", (5281, 5311), True, 'import matplotlib.pyplot as plt\n'), ((5340, 5402), 'matplotlib.pyplot.plot', 'plt.plot', (['Nbis', '(Nbis / 1000.0)', '"""--k"""'], {'label': '"""$\\\\tau = N/1000$"""'}), "(Nbis, Nbis / 1000.0, '--k', label='$\\\\tau = N/1000$')\n", (5348, 5402), True, 'import matplotlib.pyplot as plt\n'), ((5403, 5417), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (5411, 5417), True, 'import matplotlib.pyplot as plt\n'), ((5418, 5454), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""number of samples, $N$"""'], {}), "('number of samples, $N$')\n", (5428, 5454), True, 'import matplotlib.pyplot as plt\n'), ((5455, 5501), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\tau$ estimates for $\\\\pi_{1}$"""'], {}), "('$\\\\tau$ estimates for $\\\\pi_{1}$')\n", (5465, 5501), True, 'import matplotlib.pyplot as plt\n'), ((5501, 5524), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (5511, 5524), True, 'import matplotlib.pyplot as plt\n'), ((5525, 5573), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(data_dir + '/pi1_tau_estimates.pdf')"], {}), "(data_dir + '/pi1_tau_estimates.pdf')\n", (5536, 5573), True, 'import matplotlib.pyplot as plt\n'), ((5572, 5620), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(data_dir + '/pi1_tau_estimates.png')"], {}), "(data_dir + '/pi1_tau_estimates.png')\n", (5583, 5620), True, 'import matplotlib.pyplot as plt\n'), ((1066, 1085), 'numpy.sum', 'np.sum', (['(labels == 1)'], {}), '(labels == 1)\n', (1072, 1085), True, 'import numpy as np\n'), ((1478, 1510), 'numpy.mean', 'np.mean', (['Y1[labels == k]'], {'axis': '(0)'}), '(Y1[labels == k], axis=0)\n', (1485, 1510), True, 'import numpy as np\n'), ((1524, 1556), 'numpy.mean', 'np.mean', (['Y2[labels == k]'], {'axis': '(0)'}), '(Y2[labels == k], axis=0)\n', (1531, 1556), True, 'import numpy as np\n'), ((1810, 1862), 'numpy.mean', 'np.mean', (['Y1_smeared[data_smeared[:, 2] == k]'], {'axis': '(0)'}), '(Y1_smeared[data_smeared[:, 2] == k], axis=0)\n', (1817, 1862), True, 'import numpy as np\n'), ((1875, 1927), 'numpy.mean', 'np.mean', (['Y2_smeared[data_smeared[:, 2] == k]'], {'axis': '(0)'}), '(Y2_smeared[data_smeared[:, 2] == k], axis=0)\n', (1882, 1927), True, 'import numpy as np\n'), ((6877, 6918), 'necessary_functions.thin_a_sample', 'nf.thin_a_sample', (['Z_list[:, :-1]', 'tau_max'], {}), '(Z_list[:, :-1], tau_max)\n', (6893, 6918), True, 'import necessary_functions as nf\n'), ((7027, 7062), 'necessary_functions.thin_a_sample', 'nf.thin_a_sample', (['pie_list', 'tau_max'], {}), '(pie_list, tau_max)\n', (7043, 7062), True, 'import necessary_functions as nf\n'), ((7107, 7145), 'necessary_functions.thin_a_sample', 'nf.thin_a_sample', (['alphas_list', 'tau_max'], {}), '(alphas_list, tau_max)\n', (7123, 7145), True, 'import necessary_functions as nf\n'), ((7189, 7226), 'necessary_functions.thin_a_sample', 'nf.thin_a_sample', (['betas_list', 'tau_max'], {}), '(betas_list, tau_max)\n', (7205, 7226), True, 'import necessary_functions as nf\n'), ((3061, 3071), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (3068, 3071), True, 'import numpy as np\n'), ((5319, 5328), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5326, 5328), True, 'import matplotlib.pyplot as plt\n'), ((5676, 5700), 'numpy.max', 'np.max', (['autocorrelations'], {}), '(autocorrelations)\n', (5682, 5700), True, 'import numpy as np\n'), ((5066, 5077), 'numpy.log', 'np.log', (['(100)'], {}), '(100)\n', (5072, 5077), True, 'import numpy as np\n'), ((5079, 5099), 'numpy.log', 'np.log', (['(nwalkers * T)'], {}), '(nwalkers * T)\n', (5085, 5099), True, 'import numpy as np\n'), ((5906, 5950), 'necessary_functions.thin_a_sample', 'nf.thin_a_sample', (['pie_list[:, :, 0]', 'tau_max'], {}), '(pie_list[:, :, 0], tau_max)\n', (5922, 5950), True, 'import necessary_functions as nf\n'), ((6055, 6099), 'necessary_functions.thin_a_sample', 'nf.thin_a_sample', (['pie_list[:, :, 1]', 'tau_max'], {}), '(pie_list[:, :, 1], tau_max)\n', (6071, 6099), True, 'import necessary_functions as nf\n'), ((6237, 6287), 'necessary_functions.thin_a_sample', 'nf.thin_a_sample', (['alphas_list[:, :, 0, j]', 'tau_max'], {}), '(alphas_list[:, :, 0, j], tau_max)\n', (6253, 6287), True, 'import necessary_functions as nf\n'), ((6407, 6457), 'necessary_functions.thin_a_sample', 'nf.thin_a_sample', (['alphas_list[:, :, 1, j]', 'tau_max'], {}), '(alphas_list[:, :, 1, j], tau_max)\n', (6423, 6457), True, 'import necessary_functions as nf\n'), ((6598, 6647), 'necessary_functions.thin_a_sample', 'nf.thin_a_sample', (['betas_list[:, :, 0, b]', 'tau_max'], {}), '(betas_list[:, :, 0, b], tau_max)\n', (6614, 6647), True, 'import necessary_functions as nf\n'), ((6771, 6820), 'necessary_functions.thin_a_sample', 'nf.thin_a_sample', (['betas_list[:, :, 1, b]', 'tau_max'], {}), '(betas_list[:, :, 1, b], tau_max)\n', (6787, 6820), True, 'import necessary_functions as nf\n')] |
#!/usr/bin/env python
# coding: utf8
import numpy as np
from forecaster.engine.engine import *
class MovingAverage(Engine):
def __init__(self, preprocessed_data, window):
super().__init__(preprocessed_data)
self.window = window
def predict(self):
for i in range(len(self.validation.columns)):
if i == 0:
self.predictions.append(np.mean(self.training[self.training.columns[-self.window:]].values, axis=1))
if self.window + 1 > i > 0:
self.predictions.append(0.5 * (np.mean(self.training[self.training.columns[-self.window + i:]].values,
axis=1) + np.mean(self.predictions[:i], axis=0)))
if i > self.window + 1:
self.predictions.append(np.mean([self.predictions[:i]], axis=1))
self.predictions = np.transpose(np.array([row.tolist() for row in self.predictions]))
| [
"numpy.mean"
] | [((393, 468), 'numpy.mean', 'np.mean', (['self.training[self.training.columns[-self.window:]].values'], {'axis': '(1)'}), '(self.training[self.training.columns[-self.window:]].values, axis=1)\n', (400, 468), True, 'import numpy as np\n'), ((810, 849), 'numpy.mean', 'np.mean', (['[self.predictions[:i]]'], {'axis': '(1)'}), '([self.predictions[:i]], axis=1)\n', (817, 849), True, 'import numpy as np\n'), ((557, 636), 'numpy.mean', 'np.mean', (['self.training[self.training.columns[-self.window + i:]].values'], {'axis': '(1)'}), '(self.training[self.training.columns[-self.window + i:]].values, axis=1)\n', (564, 636), True, 'import numpy as np\n'), ((694, 731), 'numpy.mean', 'np.mean', (['self.predictions[:i]'], {'axis': '(0)'}), '(self.predictions[:i], axis=0)\n', (701, 731), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
import random
def create_patches(image, patch_size, overlap_horizontal, overlap_vertical, n_patches=0, order='ranked'):
"""
This function takes an image (whole spot of prostate cancer biopsy for example) and cuts it into a variable number
of patches, which may or may not overlap
:param image: a numpy array of an image [m x n x 3]
:param patch_size: int - the size that the patches should have in the end
:param overlap_horizontal: either a percentage (for example 0.3) or a pixel amount of how much overlap is desired
:param overlap_vertical: either a percentage (for example 0.3) or a pixel amount of how much overlap is desired
:param n_patches: 0 if all patches should be used, otherwise any integer
(if more patches are cut than available, white patches are returned as well)
:param order: 'original', 'shuffle', 'ranked', 'shuffle_ranked'
:return: an array of patches [n_patches x patch_size x patch_size x 3],
array of pixels in vertical direction where image patches start,
array of pixels in horizontal direction where image patches start,
array of per patch sums
"""
img_shape = image.shape
if isinstance(order, bytes):
order = order.decode("utf-8")
if n_patches == 0:
assert img_shape[0] == img_shape[1] == 2048, \
"right now, image size 2048x2048 is hard coded in tf_create_patches, " \
"if different shape is used, please change source code to that shape or define n_patches in config"
# pad image in case the image cannot be cut into patches of patchsize without having "leftovers"
pad0 = (patch_size - img_shape[0] % (patch_size - overlap_vertical)) % (patch_size - overlap_vertical)
pad1 = (patch_size - img_shape[1] % (patch_size - overlap_horizontal)) % (patch_size - overlap_horizontal)
image = np.pad(image, [[pad0 // 2, pad0 - pad0 // 2], [pad1 // 2, pad1 - pad1 // 2], [0, 0]], constant_values=255)
if n_patches == 0:
n_patches = int(np.floor((image.shape[0] - overlap_horizontal) / (overlap_horizontal - patch_size)) *
np.floor((image.shape[1] - overlap_vertical) / (overlap_vertical - patch_size)))
# in case the overlap is given as a ratio, the pixel value is calculated through the patch size
if overlap_horizontal < 1:
overlap_horizontal *= patch_size
if overlap_vertical < 1:
overlap_vertical *= patch_size
if overlap_horizontal == overlap_vertical == 0:
return advanced_patching(image, patch_size, n_patches, order)
# make sure the overlap is in whole pixels
overlap_vertical = int(overlap_vertical)
overlap_horizontal = int(overlap_horizontal)
# get the dimension of the image
image_size = image.shape[0]
first_indices_horizontal = np.arange(0, image_size - patch_size + 1, patch_size - overlap_horizontal)
first_indices_vertical = np.arange(0, image_size - patch_size + 1, patch_size - overlap_vertical)
number_resulting_patches = first_indices_horizontal.size * first_indices_vertical.size
if number_resulting_patches < n_patches:
extend_idx = n_patches - number_resulting_patches
number_resulting_patches = n_patches
else:
extend_idx = False
patches = np.ones((number_resulting_patches, patch_size, patch_size, 3)) * 255
j = 0
idx_v = []
idx_h = []
for idx_ver in first_indices_vertical:
for idx_hor in first_indices_horizontal:
patches[j, ...] = np.array(image[idx_ver:idx_ver + patch_size, idx_hor:idx_hor + patch_size, :])
j += 1
idx_v.append(idx_ver)
idx_h.append(idx_hor)
if extend_idx:
idx_v = np.pad(idx_v, [0, extend_idx], constant_values=0)
idx_h = np.pad(idx_h, [0, extend_idx], constant_values=0)
idx_v = np.array(idx_v)
idx_h = np.array(idx_h)
if (order == 'shuffle_ranked') or (order == 'ranked') or (order == 'shuffle'):
if (order == 'shuffle_ranked') or (order == 'ranked'):
# rank patches according to highest color information
idxs = np.argsort(patches.reshape(patches.shape[0], -1).sum(-1))[:n_patches]
else: # order == 'shuffle'
idxs = [i for i in range(number_resulting_patches)]
if (order == 'shuffle') or (order == 'shuffle_ranked'):
random.shuffle(idxs)
idxs = idxs[:n_patches]
patches = patches[idxs]
idx_v = idx_v[idxs]
idx_h = idx_h[idxs]
elif order == 'original':
if len(patches) > n_patches:
patches = patches[:n_patches]
idx_h = idx_h[:n_patches]
idx_v = idx_v[:n_patches]
else:
raise Exception('order needs to be one of shuffle, ranked or original')
patch_sum = np.array(patches.reshape(patches.shape[0], -1).sum(-1), 'float32')
idx_v = np.array(idx_v, dtype='int64')
idx_h = np.array(idx_h, dtype='int64')
return np.array(patches, 'float32'), np.array(idx_v), np.array(idx_h), patch_sum
def advanced_patching(image, patch_size, n_patches, order):
"""
This should be faster, because the image is just reshaped, then tiled and sorted by color value,
without for loops
inspired by https://www.kaggle.com/iafoss/panda-16x128x128-tiles
:param image: a numpy array of an image [m x n x 3]
:param patch_size: int
:param n_patches: int
:param order: shuffle or ranked or original or shuffle_ranked (if not all possible patches are cut,
shuffle the patches with most image information)
:return:
"""
img_shape = image.shape
pad0 = (patch_size - img_shape[0] % (patch_size)) % (patch_size)
pad1 = (patch_size - img_shape[1] % (patch_size)) % (patch_size)
image = np.pad(image, [[pad0 // 2, pad0 - pad0 // 2], [pad1 // 2, pad1 - pad1 // 2], [0, 0]], constant_values=255)
patches = image.reshape(image.shape[0] // patch_size, patch_size, image.shape[1] // patch_size, patch_size, 3)
first_indices_horizontal = np.arange(0, image.shape[1], patch_size)
first_indices_vertical = np.arange(0, image.shape[0], patch_size)
idx_v = first_indices_vertical.repeat(first_indices_horizontal.shape[0])
idx_h = np.tile(first_indices_horizontal, first_indices_vertical.shape[0])
patches = patches.transpose(0, 2, 1, 3, 4).reshape(-1, patch_size, patch_size, 3)
# in case more patches are needed than we actually have when cutting all, append white patches
if len(patches) < n_patches:
patches = np.pad(patches, [[0, n_patches - len(patches)], [0, 0], [0, 0], [0, 0]], constant_values=255)
idx_h = np.pad(idx_h, [[0, n_patches-len(idx_h)]], constant_values=255)
idx_v = np.pad(idx_v, [[0, n_patches-len(idx_v)]], constant_values=255)
if (order == 'shuffle') or (order == 'ranked') or (order == 'shuffle_ranked'):
patch_sum = patches.reshape(patches.shape[0], -1).sum(-1)
if (order == 'ranked') or (order == 'shuffle_ranked'):
idxs = np.argsort(patch_sum)[:n_patches]
patch_sum = patch_sum[idxs]
patches = patches[idxs]
idx_h = idx_h[idxs]
idx_v = idx_v[idxs]
if (order == 'shuffle') or (order == 'shuffle_ranked'):
idxs = [i for i in range(n_patches)]
random.shuffle(idxs)
patches = patches[idxs]
patch_sum = patch_sum[idxs]
idx_h = idx_h[idxs]
idx_v = idx_v[idxs]
elif order == 'original':
patches = patches[:n_patches]
patch_sum = patches.reshape(patches.shape[0], -1).sum(-1)[:n_patches]
idx_h = idx_h[:n_patches]
idx_v = idx_v[:n_patches]
else:
raise Exception("only order shuffle, ranked, 'shuffle_ranked' or original are valid in patching")
return patches, np.array(idx_v, dtype='int64'), np.array(idx_h, dtype='int64'), patch_sum #np.array(patches.reshape(patches.shape[0], -1).sum(-1), 'float32')
@tf.function
def tf_create_patches(image, patch_size, overlap_horizontal, overlap_vertical, n_patches, order, n_channels=3):
"""
tensorflow wrapper for patching
"""
shape = image.shape
image, _, _, patch_sums = tf.numpy_function(create_patches,
(image, patch_size, overlap_horizontal, overlap_vertical, n_patches, order),
(tf.float32, tf.int64, tf.int64, tf.float32))
if shape[0] is None:
shape0 = 2048
else:
shape0 = shape[0]
if shape[1] is None:
shape1 = 2048
else:
shape1 = shape[1]
# if number of patches is not specified, the maximum number of patches is used, which needs to be computed here
if n_patches == 0:
if shape0 % (patch_size - overlap_vertical) == 0:
image_number1 = int(shape0 / (patch_size - overlap_vertical))
else:
image_number1 = int(np.ceil(shape0 / (patch_size - overlap_vertical)))
if shape1 % (patch_size - overlap_horizontal) == 0:
image_number2 = int(shape1 / (patch_size - overlap_horizontal))
else:
image_number2 = int(np.ceil(shape1 / (patch_size - overlap_horizontal)))
image_number = image_number1 * image_number2
else:
image_number = n_patches
image = tf.reshape(image, [image_number, patch_size, patch_size, n_channels])
return image, patch_sums
| [
"numpy.pad",
"numpy.ceil",
"random.shuffle",
"numpy.floor",
"tensorflow.reshape",
"numpy.ones",
"numpy.argsort",
"numpy.arange",
"numpy.tile",
"numpy.array",
"tensorflow.numpy_function"
] | [((1937, 2047), 'numpy.pad', 'np.pad', (['image', '[[pad0 // 2, pad0 - pad0 // 2], [pad1 // 2, pad1 - pad1 // 2], [0, 0]]'], {'constant_values': '(255)'}), '(image, [[pad0 // 2, pad0 - pad0 // 2], [pad1 // 2, pad1 - pad1 // 2],\n [0, 0]], constant_values=255)\n', (1943, 2047), True, 'import numpy as np\n'), ((2890, 2964), 'numpy.arange', 'np.arange', (['(0)', '(image_size - patch_size + 1)', '(patch_size - overlap_horizontal)'], {}), '(0, image_size - patch_size + 1, patch_size - overlap_horizontal)\n', (2899, 2964), True, 'import numpy as np\n'), ((2994, 3066), 'numpy.arange', 'np.arange', (['(0)', '(image_size - patch_size + 1)', '(patch_size - overlap_vertical)'], {}), '(0, image_size - patch_size + 1, patch_size - overlap_vertical)\n', (3003, 3066), True, 'import numpy as np\n'), ((3919, 3934), 'numpy.array', 'np.array', (['idx_v'], {}), '(idx_v)\n', (3927, 3934), True, 'import numpy as np\n'), ((3947, 3962), 'numpy.array', 'np.array', (['idx_h'], {}), '(idx_h)\n', (3955, 3962), True, 'import numpy as np\n'), ((4957, 4987), 'numpy.array', 'np.array', (['idx_v'], {'dtype': '"""int64"""'}), "(idx_v, dtype='int64')\n", (4965, 4987), True, 'import numpy as np\n'), ((5000, 5030), 'numpy.array', 'np.array', (['idx_h'], {'dtype': '"""int64"""'}), "(idx_h, dtype='int64')\n", (5008, 5030), True, 'import numpy as np\n'), ((5907, 6017), 'numpy.pad', 'np.pad', (['image', '[[pad0 // 2, pad0 - pad0 // 2], [pad1 // 2, pad1 - pad1 // 2], [0, 0]]'], {'constant_values': '(255)'}), '(image, [[pad0 // 2, pad0 - pad0 // 2], [pad1 // 2, pad1 - pad1 // 2],\n [0, 0]], constant_values=255)\n', (5913, 6017), True, 'import numpy as np\n'), ((6160, 6200), 'numpy.arange', 'np.arange', (['(0)', 'image.shape[1]', 'patch_size'], {}), '(0, image.shape[1], patch_size)\n', (6169, 6200), True, 'import numpy as np\n'), ((6230, 6270), 'numpy.arange', 'np.arange', (['(0)', 'image.shape[0]', 'patch_size'], {}), '(0, image.shape[0], patch_size)\n', (6239, 6270), True, 'import numpy as np\n'), ((6360, 6426), 'numpy.tile', 'np.tile', (['first_indices_horizontal', 'first_indices_vertical.shape[0]'], {}), '(first_indices_horizontal, first_indices_vertical.shape[0])\n', (6367, 6426), True, 'import numpy as np\n'), ((8333, 8497), 'tensorflow.numpy_function', 'tf.numpy_function', (['create_patches', '(image, patch_size, overlap_horizontal, overlap_vertical, n_patches, order)', '(tf.float32, tf.int64, tf.int64, tf.float32)'], {}), '(create_patches, (image, patch_size, overlap_horizontal,\n overlap_vertical, n_patches, order), (tf.float32, tf.int64, tf.int64,\n tf.float32))\n', (8350, 8497), True, 'import tensorflow as tf\n'), ((9439, 9508), 'tensorflow.reshape', 'tf.reshape', (['image', '[image_number, patch_size, patch_size, n_channels]'], {}), '(image, [image_number, patch_size, patch_size, n_channels])\n', (9449, 9508), True, 'import tensorflow as tf\n'), ((3358, 3420), 'numpy.ones', 'np.ones', (['(number_resulting_patches, patch_size, patch_size, 3)'], {}), '((number_resulting_patches, patch_size, patch_size, 3))\n', (3365, 3420), True, 'import numpy as np\n'), ((3791, 3840), 'numpy.pad', 'np.pad', (['idx_v', '[0, extend_idx]'], {'constant_values': '(0)'}), '(idx_v, [0, extend_idx], constant_values=0)\n', (3797, 3840), True, 'import numpy as np\n'), ((3857, 3906), 'numpy.pad', 'np.pad', (['idx_h', '[0, extend_idx]'], {'constant_values': '(0)'}), '(idx_h, [0, extend_idx], constant_values=0)\n', (3863, 3906), True, 'import numpy as np\n'), ((5042, 5070), 'numpy.array', 'np.array', (['patches', '"""float32"""'], {}), "(patches, 'float32')\n", (5050, 5070), True, 'import numpy as np\n'), ((5072, 5087), 'numpy.array', 'np.array', (['idx_v'], {}), '(idx_v)\n', (5080, 5087), True, 'import numpy as np\n'), ((5089, 5104), 'numpy.array', 'np.array', (['idx_h'], {}), '(idx_h)\n', (5097, 5104), True, 'import numpy as np\n'), ((7958, 7988), 'numpy.array', 'np.array', (['idx_v'], {'dtype': '"""int64"""'}), "(idx_v, dtype='int64')\n", (7966, 7988), True, 'import numpy as np\n'), ((7990, 8020), 'numpy.array', 'np.array', (['idx_h'], {'dtype': '"""int64"""'}), "(idx_h, dtype='int64')\n", (7998, 8020), True, 'import numpy as np\n'), ((3590, 3668), 'numpy.array', 'np.array', (['image[idx_ver:idx_ver + patch_size, idx_hor:idx_hor + patch_size, :]'], {}), '(image[idx_ver:idx_ver + patch_size, idx_hor:idx_hor + patch_size, :])\n', (3598, 3668), True, 'import numpy as np\n'), ((4441, 4461), 'random.shuffle', 'random.shuffle', (['idxs'], {}), '(idxs)\n', (4455, 4461), False, 'import random\n'), ((7447, 7467), 'random.shuffle', 'random.shuffle', (['idxs'], {}), '(idxs)\n', (7461, 7467), False, 'import random\n'), ((2091, 2178), 'numpy.floor', 'np.floor', (['((image.shape[0] - overlap_horizontal) / (overlap_horizontal - patch_size))'], {}), '((image.shape[0] - overlap_horizontal) / (overlap_horizontal -\n patch_size))\n', (2099, 2178), True, 'import numpy as np\n'), ((2201, 2280), 'numpy.floor', 'np.floor', (['((image.shape[1] - overlap_vertical) / (overlap_vertical - patch_size))'], {}), '((image.shape[1] - overlap_vertical) / (overlap_vertical - patch_size))\n', (2209, 2280), True, 'import numpy as np\n'), ((7148, 7169), 'numpy.argsort', 'np.argsort', (['patch_sum'], {}), '(patch_sum)\n', (7158, 7169), True, 'import numpy as np\n'), ((9045, 9094), 'numpy.ceil', 'np.ceil', (['(shape0 / (patch_size - overlap_vertical))'], {}), '(shape0 / (patch_size - overlap_vertical))\n', (9052, 9094), True, 'import numpy as np\n'), ((9278, 9329), 'numpy.ceil', 'np.ceil', (['(shape1 / (patch_size - overlap_horizontal))'], {}), '(shape1 / (patch_size - overlap_horizontal))\n', (9285, 9329), True, 'import numpy as np\n')] |
import numpy as np
import logging
logger = logging.getLogger(__name__)
class Grid(object):
"""
Class to manage the retiling of large-scale point-cloud data to a regular
grid. Tools allow to verify whether points belong to a given tile, and to
generate target points for feature extraction
"""
def __init__(self):
self.min_x = 0.
self.min_y = 0.
self.max_x = 0.
self.max_y = 0.
self.n_tiles_side = 1
self.is_set = False
def setup(self, min_x, min_y, max_x, max_y, n_tiles_side):
"""
Setup the grid.
:param min_x: Min x value of the tiling schema
:param min_y: Min y value of the tiling schema
:param max_x: Max x value of the tiling schema
:param max_y: Max y value of the tiling schema
:param n_tiles_side: Number of tiles along X and Y (tiling MUST be
square)
"""
self.n_tiles_side = n_tiles_side
self.min_x = min_x
self.min_y = min_y
self.max_x = max_x
self.max_y = max_y
self._check_finite_extent()
self._check_grid_is_square()
self.is_set = True
@property
def n_tiles_side(self):
""" Number of tiles along each direction. """
return self._n_tiles_side
@n_tiles_side.setter
def n_tiles_side(self, n_tiles_side):
if not isinstance(n_tiles_side, int) or n_tiles_side < 1:
raise ValueError('n_tiles_side must be int > 0! Got instead: '
'{}'.format(n_tiles_side))
self._n_tiles_side = n_tiles_side
@property
def grid_mins(self):
""" Lower grid boundaries. """
return np.array([self.min_x, self.min_y], dtype=np.float)
@property
def grid_maxs(self):
""" Upper grid boundaries. """
return np.array([self.max_x, self.max_y], dtype=np.float)
@property
def grid_width(self):
""" Width of the grid. """
return self.grid_maxs - self.grid_mins
@property
def tile_width(self):
""" Width of a tile. """
return self.grid_width / self.n_tiles_side
def get_tile_index(self, px, py):
"""
Determine the indices of the tile to which one of more points belong.
:param px: X coordinate(s) of the point(s)
:param py: Y coordinate(s) of the point(s)
"""
self._check_finite_extent()
point_cart = np.array([px, py], dtype=np.float).T
point_dir = (point_cart - self.grid_mins) / self.tile_width
indices = np.floor(point_dir).astype('int')
# If point falls outside the edge of the grid raise warning
mask_invalid_indices = np.logical_or(indices >= self.n_tiles_side,
indices < 0)
if mask_invalid_indices.any():
# axis = 1 if len(mask_invalid_indices.shape) > 1 else 0
# num_invalid_points = np.all(mask_invalid_indices, axis=axis).sum()
logger.warning("Points fall outside the bounds Min X={} Y={}, "
"Max X={} Y={}".format(*self.grid_mins,
*self.grid_maxs))
return indices
def get_tile_bounds(self, tile_index_x, tile_index_y):
"""
Determine the boundaries of a tile given its X and Y indices.
:param tile_index_x: Tile index along X
:param tile_index_y: Tile index along Y
"""
tile_index = np.array([tile_index_x, tile_index_y], dtype=np.int)
tile_mins = tile_index * self.tile_width + self.grid_mins
tile_maxs = tile_mins + self.tile_width
return tile_mins, tile_maxs
def is_point_in_tile(self, px, py, tile_index_x, tile_index_y,
precision=None):
"""
Determine whether one or more points belong to a tile (within an
optional precision threshold).
:param px: X coordinate(s) of the point(s)
:param py: Y coordinate(s) of the point(s)
:param tile_index_x: Tile index along X
:param tile_index_y: Tile index along Y
:param precision: Optional precision threshold to determine whether
the point(s) belong to the tile
"""
if precision is None:
indices = np.array([tile_index_x, tile_index_y], dtype=np.int).T
mask = indices == self.get_tile_index(px, py)
else:
point_cart = np.array([px, py], dtype=np.float).T
tile_mins, tile_maxs = self.get_tile_bounds(tile_index_x,
tile_index_y)
mask = np.logical_and(tile_mins - point_cart <= precision,
point_cart - tile_maxs <= precision)
return np.all(mask, axis=1)
def generate_tile_mesh(self, tile_index_x, tile_index_y, tile_mesh_size):
"""
Generate a mesh of points with a given spacing in a tile.
:param tile_index_x: Tile index along X
:param tile_index_y: Tile index along Y
:param tile_mesh_size: Spacing of the mesh (NOTE: each tile should
fit an integer number of this value)
:return:
"""
tile_mins, tile_maxs = self.get_tile_bounds(tile_index_x, tile_index_y)
n_points_per_dim = self.tile_width / tile_mesh_size
if not np.any(np.isclose(n_points_per_dim, np.rint(n_points_per_dim))):
raise ValueError('The tile width is not multiple'
'of the chosen mesh!')
offset = tile_mins + tile_mesh_size/2.
x = np.arange(0., self.tile_width[0], tile_mesh_size) + offset[0]
y = np.arange(0., self.tile_width[1], tile_mesh_size) + offset[1]
xv, yv = np.meshgrid(x, y)
return xv.flatten(), yv.flatten()
def _check_finite_extent(self):
for n_dim in range(1):
if np.isclose(self.grid_width[n_dim], 0.):
raise ValueError('Zero grid extend in {}!'.format('xy'[n_dim]))
def _check_grid_is_square(self):
if not np.isclose(self.tile_width[0], self.tile_width[1]):
raise ValueError('Grid is not square!')
| [
"numpy.meshgrid",
"numpy.logical_and",
"numpy.floor",
"logging.getLogger",
"numpy.isclose",
"numpy.rint",
"numpy.array",
"numpy.logical_or",
"numpy.arange",
"numpy.all"
] | [((45, 72), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (62, 72), False, 'import logging\n'), ((1700, 1750), 'numpy.array', 'np.array', (['[self.min_x, self.min_y]'], {'dtype': 'np.float'}), '([self.min_x, self.min_y], dtype=np.float)\n', (1708, 1750), True, 'import numpy as np\n'), ((1845, 1895), 'numpy.array', 'np.array', (['[self.max_x, self.max_y]'], {'dtype': 'np.float'}), '([self.max_x, self.max_y], dtype=np.float)\n', (1853, 1895), True, 'import numpy as np\n'), ((2701, 2757), 'numpy.logical_or', 'np.logical_or', (['(indices >= self.n_tiles_side)', '(indices < 0)'], {}), '(indices >= self.n_tiles_side, indices < 0)\n', (2714, 2757), True, 'import numpy as np\n'), ((3498, 3550), 'numpy.array', 'np.array', (['[tile_index_x, tile_index_y]'], {'dtype': 'np.int'}), '([tile_index_x, tile_index_y], dtype=np.int)\n', (3506, 3550), True, 'import numpy as np\n'), ((4800, 4820), 'numpy.all', 'np.all', (['mask'], {'axis': '(1)'}), '(mask, axis=1)\n', (4806, 4820), True, 'import numpy as np\n'), ((5773, 5790), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (5784, 5790), True, 'import numpy as np\n'), ((2445, 2479), 'numpy.array', 'np.array', (['[px, py]'], {'dtype': 'np.float'}), '([px, py], dtype=np.float)\n', (2453, 2479), True, 'import numpy as np\n'), ((4662, 4754), 'numpy.logical_and', 'np.logical_and', (['(tile_mins - point_cart <= precision)', '(point_cart - tile_maxs <= precision)'], {}), '(tile_mins - point_cart <= precision, point_cart - tile_maxs <=\n precision)\n', (4676, 4754), True, 'import numpy as np\n'), ((5620, 5670), 'numpy.arange', 'np.arange', (['(0.0)', 'self.tile_width[0]', 'tile_mesh_size'], {}), '(0.0, self.tile_width[0], tile_mesh_size)\n', (5629, 5670), True, 'import numpy as np\n'), ((5694, 5744), 'numpy.arange', 'np.arange', (['(0.0)', 'self.tile_width[1]', 'tile_mesh_size'], {}), '(0.0, self.tile_width[1], tile_mesh_size)\n', (5703, 5744), True, 'import numpy as np\n'), ((5916, 5955), 'numpy.isclose', 'np.isclose', (['self.grid_width[n_dim]', '(0.0)'], {}), '(self.grid_width[n_dim], 0.0)\n', (5926, 5955), True, 'import numpy as np\n'), ((6089, 6139), 'numpy.isclose', 'np.isclose', (['self.tile_width[0]', 'self.tile_width[1]'], {}), '(self.tile_width[0], self.tile_width[1])\n', (6099, 6139), True, 'import numpy as np\n'), ((2568, 2587), 'numpy.floor', 'np.floor', (['point_dir'], {}), '(point_dir)\n', (2576, 2587), True, 'import numpy as np\n'), ((4314, 4366), 'numpy.array', 'np.array', (['[tile_index_x, tile_index_y]'], {'dtype': 'np.int'}), '([tile_index_x, tile_index_y], dtype=np.int)\n', (4322, 4366), True, 'import numpy as np\n'), ((4466, 4500), 'numpy.array', 'np.array', (['[px, py]'], {'dtype': 'np.float'}), '([px, py], dtype=np.float)\n', (4474, 4500), True, 'import numpy as np\n'), ((5417, 5442), 'numpy.rint', 'np.rint', (['n_points_per_dim'], {}), '(n_points_per_dim)\n', (5424, 5442), True, 'import numpy as np\n')] |
################################################################################
# Copyright (C) 2013 <NAME>
#
# This file is licensed under the MIT License.
################################################################################
"""
Unit tests for bayespy.utils.linalg module.
"""
import numpy as np
from .. import misc
from .. import linalg
class TestDot(misc.TestCase):
def test_dot(self):
"""
Test dot product multiple multi-dimensional arrays.
"""
# If no arrays, return 0
self.assertAllClose(linalg.dot(),
0)
# If only one array, return itself
self.assertAllClose(linalg.dot([[1,2,3],
[4,5,6]]),
[[1,2,3],
[4,5,6]])
# Basic test of two arrays: (2,3) * (3,2)
self.assertAllClose(linalg.dot([[1,2,3],
[4,5,6]],
[[7,8],
[9,1],
[2,3]]),
[[31,19],
[85,55]])
# Basic test of four arrays: (2,3) * (3,2) * (2,1) * (1,2)
self.assertAllClose(linalg.dot([[1,2,3],
[4,5,6]],
[[7,8],
[9,1],
[2,3]],
[[4],
[5]],
[[6,7]]),
[[1314,1533],
[3690,4305]])
# Test broadcasting: (2,2,2) * (2,2,2,2)
self.assertAllClose(linalg.dot([[[1,2],
[3,4]],
[[5,6],
[7,8]]],
[[[[1,2],
[3,4]],
[[5,6],
[7,8]]],
[[[9,1],
[2,3]],
[[4,5],
[6,7]]]]),
[[[[ 7, 10],
[ 15, 22]],
[[ 67, 78],
[ 91, 106]]],
[[[ 13, 7],
[ 35, 15]],
[[ 56, 67],
[ 76, 91]]]])
# Inconsistent shapes: (2,3) * (2,3)
self.assertRaises(ValueError,
linalg.dot,
[[1,2,3],
[4,5,6]],
[[1,2,3],
[4,5,6]])
# Other axes do not broadcast: (2,2,2) * (3,2,2)
self.assertRaises(ValueError,
linalg.dot,
[[[1,2],
[3,4]],
[[5,6],
[7,8]]],
[[[1,2],
[3,4]],
[[5,6],
[7,8]],
[[9,1],
[2,3]]])
# Do not broadcast matrix axes: (2,1) * (3,2)
self.assertRaises(ValueError,
linalg.dot,
[[1],
[2]],
[[1,2,3],
[4,5,6]])
# Do not accept less than 2-D arrays: (2) * (2,2)
self.assertRaises(ValueError,
linalg.dot,
[1,2],
[[1,2,3],
[4,5,6]])
class TestBandedSolve(misc.TestCase):
def test_block_banded_solve(self):
"""
Test the Gaussian elimination algorithm for block-banded matrices.
"""
#
# Create a block-banded matrix
#
# Number of blocks
N = 40
# Random sizes of the blocks
#D = np.random.randint(5, 10, size=N)
# Fixed sizes of the blocks
D = 5*np.ones(N, dtype=np.int)
# Some helpful variables to create the covariances
W = [np.random.randn(D[i], 2*D[i])
for i in range(N)]
# The diagonal blocks (covariances)
A = [np.dot(W[i], W[i].T) for i in range(N)]
# The superdiagonal blocks (cross-covariances)
B = [np.dot(W[i][:,-1:], W[i+1][:,:1].T) for i in range(N-1)]
C = misc.block_banded(A, B)
# Create the system to be solved: y=C*x
x_true = np.random.randn(np.sum(D))
y = np.dot(C, x_true)
x_true = np.reshape(x_true, (N, -1))
y = np.reshape(y, (N, -1))
#
# Run tests
#
# The correct inverse
invC = np.linalg.inv(C)
# Inverse from the function that is tested
(invA, invB, x, ldet) = linalg.block_banded_solve(np.asarray(A),
np.asarray(B),
np.asarray(y))
# Check that you get the correct number of blocks
self.assertEqual(len(invA), N)
self.assertEqual(len(invB), N-1)
# Check each block
i0 = 0
for i in range(N-1):
i1 = i0 + D[i]
i2 = i1 + D[i+1]
# Check diagonal block
self.assertTrue(np.allclose(invA[i], invC[i0:i1, i0:i1]))
# Check super-diagonal block
self.assertTrue(np.allclose(invB[i], invC[i0:i1, i1:i2]))
i0 = i1
# Check last block
self.assertTrue(np.allclose(invA[-1], invC[i0:, i0:]))
# Check the solution of the system
self.assertTrue(np.allclose(x_true, x))
# Check the log determinant
self.assertAlmostEqual(ldet/np.linalg.slogdet(C)[1], 1)
| [
"numpy.sum",
"numpy.random.randn",
"numpy.asarray",
"numpy.allclose",
"numpy.ones",
"numpy.linalg.inv",
"numpy.reshape",
"numpy.linalg.slogdet",
"numpy.dot"
] | [((4931, 4948), 'numpy.dot', 'np.dot', (['C', 'x_true'], {}), '(C, x_true)\n', (4937, 4948), True, 'import numpy as np\n'), ((4966, 4993), 'numpy.reshape', 'np.reshape', (['x_true', '(N, -1)'], {}), '(x_true, (N, -1))\n', (4976, 4993), True, 'import numpy as np\n'), ((5006, 5028), 'numpy.reshape', 'np.reshape', (['y', '(N, -1)'], {}), '(y, (N, -1))\n', (5016, 5028), True, 'import numpy as np\n'), ((5116, 5132), 'numpy.linalg.inv', 'np.linalg.inv', (['C'], {}), '(C)\n', (5129, 5132), True, 'import numpy as np\n'), ((4406, 4430), 'numpy.ones', 'np.ones', (['N'], {'dtype': 'np.int'}), '(N, dtype=np.int)\n', (4413, 4430), True, 'import numpy as np\n'), ((4504, 4535), 'numpy.random.randn', 'np.random.randn', (['D[i]', '(2 * D[i])'], {}), '(D[i], 2 * D[i])\n', (4519, 4535), True, 'import numpy as np\n'), ((4624, 4644), 'numpy.dot', 'np.dot', (['W[i]', 'W[i].T'], {}), '(W[i], W[i].T)\n', (4630, 4644), True, 'import numpy as np\n'), ((4732, 4771), 'numpy.dot', 'np.dot', (['W[i][:, -1:]', 'W[i + 1][:, :1].T'], {}), '(W[i][:, -1:], W[i + 1][:, :1].T)\n', (4738, 4771), True, 'import numpy as np\n'), ((4908, 4917), 'numpy.sum', 'np.sum', (['D'], {}), '(D)\n', (4914, 4917), True, 'import numpy as np\n'), ((5243, 5256), 'numpy.asarray', 'np.asarray', (['A'], {}), '(A)\n', (5253, 5256), True, 'import numpy as np\n'), ((5316, 5329), 'numpy.asarray', 'np.asarray', (['B'], {}), '(B)\n', (5326, 5329), True, 'import numpy as np\n'), ((5389, 5402), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (5399, 5402), True, 'import numpy as np\n'), ((5958, 5995), 'numpy.allclose', 'np.allclose', (['invA[-1]', 'invC[i0:, i0:]'], {}), '(invA[-1], invC[i0:, i0:])\n', (5969, 5995), True, 'import numpy as np\n'), ((6065, 6087), 'numpy.allclose', 'np.allclose', (['x_true', 'x'], {}), '(x_true, x)\n', (6076, 6087), True, 'import numpy as np\n'), ((5734, 5774), 'numpy.allclose', 'np.allclose', (['invA[i]', 'invC[i0:i1, i0:i1]'], {}), '(invA[i], invC[i0:i1, i0:i1])\n', (5745, 5774), True, 'import numpy as np\n'), ((5845, 5885), 'numpy.allclose', 'np.allclose', (['invB[i]', 'invC[i0:i1, i1:i2]'], {}), '(invB[i], invC[i0:i1, i1:i2])\n', (5856, 5885), True, 'import numpy as np\n'), ((6162, 6182), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['C'], {}), '(C)\n', (6179, 6182), True, 'import numpy as np\n')] |
"""Deep Dreaming using Caffe and Google's Inception convolutional neural network."""
# pylint: disable=invalid-name, wrong-import-position
from collections import namedtuple, OrderedDict
import logging
import multiprocessing as mp
import os
from pathlib import Path
import queue
import re
import sys
os.environ['GLOG_minloglevel'] = '1'
import caffe
import numpy as np
from PIL import Image
from scipy import ndimage
try:
from tqdm import tqdm
except ImportError:
pass
from .tile_worker import TileRequest, TileWorker
class TQDMStream:
def __init__(self, stream):
self.stream = stream
self.redirected = False
def write(self, s):
if 'tqdm' in globals() and self.redirected:
s.rstrip() and tqdm.write(s, file=self.stream)
else:
self.stream.write(s)
def flush(self):
self.stream.flush()
logger = logging.getLogger(__name__)
stream = TQDMStream(sys.stderr)
CTX = mp.get_context('spawn')
EPS = np.finfo(np.float32).eps
# Note that the per-channel mean values are in BGR order.
CNNData = namedtuple('CNNData', 'deploy model mean categories')
CNNData.__new__.__defaults__ = (None,) # Make categories optional.
_BASE_DIR = Path(__file__).parent.parent
GOOGLENET_BVLC = CNNData(
_BASE_DIR/'bvlc_googlenet/deploy.prototxt',
_BASE_DIR/'bvlc_googlenet/bvlc_googlenet.caffemodel',
(104, 117, 123),
categories=_BASE_DIR/'bvlc_googlenet/categories.txt')
GOOGLENET_PLACES205 = CNNData(
_BASE_DIR/'googlenet_places205/deploy_places205.prototxt',
_BASE_DIR/'googlenet_places205/googlelet_places205_train_iter_2400000.caffemodel',
(105.417, 113.753, 116.047),
categories=_BASE_DIR/'googlenet_places205/categories.txt')
GOOGLENET_PLACES365 = CNNData(
_BASE_DIR/'googlenet_places365/deploy_googlenet_places365.prototxt',
_BASE_DIR/'googlenet_places365/googlenet_places365.caffemodel',
(104.051, 112.514, 116.676),
categories=_BASE_DIR/'googlenet_places365/categories_places365.txt')
RESNET_50 = CNNData(
_BASE_DIR/'resnet/ResNet-50-deploy.prototxt',
_BASE_DIR/'resnet/ResNet-50-model.caffemodel',
(104, 117, 123),
categories=_BASE_DIR/'bvlc_googlenet/categories.txt')
def normf(arr, *args, **kwargs):
return np.linalg.norm(arr.flatten(), *args, **kwargs)
def call_normalized(fn, arr, *args, **kwargs):
normed = arr.copy()
offset = normed.min()
normed -= offset
scale = normed.max()
normed /= scale
ret = fn(normed, *args, **kwargs)
if isinstance(ret, np.ndarray):
return ret * scale + offset
return ret
def save_as_hdr(arr, filename, gamma=2.2, allow_negative=True):
"""Saves a float32 ndarray to a high dynamic range (OpenEXR or float32 TIFF) file.
Args:
arr (ndarray): The input array.
filename (str | Path): The output filename.
gamma (Optional[float]): The encoding gamma of arr.
allow_negative (Optional[bool]): Clip negative values to zero if false."""
arr = arr.astype(np.float32)/255
if not allow_negative:
arr[arr < 0] = 0
if gamma != 1:
arr = np.sign(arr)*np.abs(arr)**gamma
filename = str(filename)
extension = filename.rpartition('.')[2].lower()
if extension == 'exr':
import OpenEXR
exr = OpenEXR.OutputFile(filename, OpenEXR.Header(arr.shape[1], arr.shape[0]))
exr.writePixels({'R': arr[..., 0].tobytes(),
'G': arr[..., 1].tobytes(),
'B': arr[..., 2].tobytes()})
exr.close()
elif extension == 'tif' or extension == 'tiff':
import tifffile
tiff = tifffile.TiffWriter(filename)
tiff.save(arr, photometric='rgb')
tiff.close()
else:
raise Exception('Unknown HDR file format.')
def to_image(arr):
"""Clips the values in a float32 ndarray to 0-255 and converts it to a PIL image.
Args:
arr (ndarray): The input array."""
return Image.fromarray(np.uint8(np.clip(np.round(arr), 0, 255)))
def _resize(arr, size, method=Image.BICUBIC):
h, w = size
arr = np.float32(arr)
if arr.ndim == 3:
planes = [arr[i, :, :] for i in range(arr.shape[0])]
else:
raise TypeError('Only 3D CxHxW arrays are supported')
imgs = [Image.fromarray(plane) for plane in planes]
imgs_resized = [img.resize((w, h), method) for img in imgs]
return np.stack([np.array(img) for img in imgs_resized])
def roll2(arr, xy):
x, y = xy
return np.roll(np.roll(arr, x, 2), y, 1)
def tv_norm(x, beta=2):
"""Computes the total variation norm and its gradient. From jcjohnson/cnn-vis."""
x_diff = ndimage.convolve1d(x, [-1, 1], axis=2, mode='wrap')
y_diff = ndimage.convolve1d(x, [-1, 1], axis=1, mode='wrap')
grad_norm2 = x_diff**2 + y_diff**2 + EPS
grad_norm_beta = grad_norm2**(beta/2)
loss = np.sum(grad_norm_beta)
dgrad_norm2 = (beta/2) * grad_norm2**(beta/2 - 1)
dx_diff = 2 * x_diff * dgrad_norm2
dy_diff = 2 * y_diff * dgrad_norm2
dxy_diff = dx_diff + dy_diff
dx_diff = roll2(dx_diff, (1, 0))
dy_diff = roll2(dy_diff, (0, 1))
grad = dxy_diff - dx_diff - dy_diff
return loss, grad
class ShapeError(Exception):
"""Raised by CNN when an invalid layer shape is requested which would otherwise crash Caffe."""
def __str__(self):
return 'bad shape %s' % self.args
class CaffeStateError(Exception):
"""Raised by CNN when the worker processes have died or malfunctioned, or Caffe is otherwise
in a bad state. This error is only dealable with by creating a new CNN instance."""
def __str__(self):
return 'Bad Caffe state: %s' % self.args
class _LayerIndexer:
def __init__(self, net, attr):
self.net, self.attr = net, attr
def __getitem__(self, key):
return getattr(self.net.blobs[key], self.attr)[0]
def __setitem__(self, key, value):
getattr(self.net.blobs[key], self.attr)[0] = value
class CNN:
"""Represents an instance of a Caffe convolutional neural network."""
def __init__(self, cnndata, cpu_workers=0, gpus=[]):
"""Initializes a CNN.
Example:
CNN(GOOGLENET_PLACES365, cpu_workers=0, gpus=[0])
Args:
cpu_workers (Optional[int]): The number of CPU workers to start. The default is 1 if
no other compute devices are specified.
gpus (Optional[list[int]]): The GPU device numbers to start GPU workers on.
"""
caffe.set_mode_cpu()
self.net = caffe.Net(str(cnndata.deploy), 1, weights=str(cnndata.model))
self.mean = np.float32(cnndata.mean)
self.data = _LayerIndexer(self.net, 'data')
self.diff = _LayerIndexer(self.net, 'diff')
try:
self.categories = [str(i) for i in range(self.data['prob'].size)]
except KeyError:
self.categories = []
if cnndata.categories is not None:
self.categories = open(str(cnndata.categories)).read().splitlines()
self.img = np.zeros_like(self.data['data'])
self.step = 0
self.total_px = 0
self.progress_bar = None
self.req_q = CTX.JoinableQueue()
self.resp_q = CTX.Queue()
self.workers = []
self.is_healthy = True
if not cpu_workers and not gpus:
cpu_workers = 1
for _ in range(cpu_workers):
self.workers.append(TileWorker(self.req_q, self.resp_q, cnndata, None))
for gpu in gpus:
self.workers.append(TileWorker(self.req_q, self.resp_q, cnndata, gpu))
def __del__(self):
self.is_healthy = False
for worker in self.workers:
worker.__del__()
def ensure_healthy(self):
"""Ensures that the worker subprocesses are healthy. If one has terminated, it will
terminate the others, set self.is_healthy to False, and raise a CaffeStateError."""
if not self.is_healthy:
raise CaffeStateError(
'The worker processes were terminated. Please make a new CNN instance.')
for worker in self.workers:
if worker.proc.exitcode:
logger.error('Tile worker %s (pid %d) crashed.', worker.proc.name, worker.proc.pid)
self.__del__()
raise CaffeStateError('Worker process malfunction detected; terminating others.')
return True
def _preprocess(self, img):
"""Converts from HxWx3 RGB to 3xHxW BGR and subtracts the network per-channel mean."""
return np.rollaxis(np.float32(img), 2)[::-1] - self.mean[:, None, None]
def _deprocess(self, img):
"""Converts from 3xHxW BGR to HxWx3 RGB and adds the network per-channel mean."""
return np.dstack((img + self.mean[:, None, None])[::-1])
def get_features(self, input_img, layers=None, max_tile_size=512):
"""Retrieve feature maps from the classification (forward) phase of operation.
Example:
cnn.get_features(img, ['prob'])['prob'] classifies 'img' and returns the predicted
probability distribution over the network's categories.
Returns:
A dict which maps each layer in layers to a retrieved feature map.
"""
input_arr = self._preprocess(np.float32(input_img))
h = min(max_tile_size, input_arr.shape[1])
w = min(max_tile_size, input_arr.shape[2])
if max(*input_arr.shape[1:]) > max_tile_size:
input_arr = _resize(input_arr, (h, w))
self.net.blobs['data'].reshape(1, 3, h, w)
self.data['data'] = input_arr
end = self.layers()[-1]
self.net.forward(end=end)
if not layers:
layers = self.layers()
features = OrderedDict()
for layer in layers:
features[layer] = self.data[layer].copy()
return features
def _grad_tiled(self, layers, progress=True, max_tile_size=512, **kwargs):
# pylint: disable=too-many-locals
if 'tqdm' in globals() and progress:
if not self.progress_bar:
stream.redirected = True
self.progress_bar = tqdm(
total=self.total_px, unit='pix', unit_scale=True, ncols=80, dynamic_ncols=True,
smoothing=0.1)
h, w = self.img.shape[1:] # Height and width of input image
ny, nx = (h-1)//max_tile_size+1, (w-1)//max_tile_size+1 # Number of tiles per dimension
g = np.zeros_like(self.img)
for y in range(ny):
th = h//ny
if y == ny-1:
th += h - th*ny
for x in range(nx):
tw = w//nx
if x == nx-1:
tw += w - tw*nx
sy, sx = h//ny*y, w//nx*x
data = self.img[:, sy:sy+th, sx:sx+tw]
self.ensure_healthy()
self.req_q.put(TileRequest((sy, sx), data, layers, self.step == 0))
for _ in range(ny*nx):
while True:
try:
self.ensure_healthy()
resp, grad = self.resp_q.get(True, 1)
break
except queue.Empty:
continue
sy, sx = resp
g[:, sy:sy+grad.shape[1], sx:sx+grad.shape[2]] = grad
if 'tqdm' in globals() and progress:
self.progress_bar.update(np.prod(grad.shape[-2:]))
return g
def _step(self, n=1, step_size=1, g_weight=1, l2_reg=0, tv_reg=0, p=2, beta=2,
jitter=32, seed=0, save_intermediates=False, **kwargs):
np.random.seed(self.img.size + seed)
for t in range(1, n+1):
xy = np.random.randint(-jitter, jitter+1, 2)
self.img = roll2(self.img, xy)
# Compute normalized gradients and update image
g = self._grad_tiled(**kwargs)
g /= np.mean(np.abs(g)) + EPS
_, tv_g = tv_norm(self.img, beta)
tv_g /= 255**(beta-1)
l2_g = p * np.sign(self.img) * np.abs(self.img / 127.5)**(p-1)
grad = g_weight*g - l2_reg*l2_g - tv_reg*tv_g
self.img += step_size * grad
self.img = roll2(self.img, -xy)
if save_intermediates:
to_image(self._deprocess(self.img)).save('out%04d.bmp' % self.step)
self.step += 1
def _octave_detail(self, base, min_size=128, per_octave=2, fn=None, **kwargs):
if 'n' not in kwargs:
kwargs['n'] = 10
n = kwargs['n']
fnargs = {}
if fn:
fnargs.update(fn(base.shape[-2:]))
if 'n' in fnargs:
n = fnargs['n']
if min(base.shape[1:]) < 32:
raise ShapeError(base.shape)
factor = 2**(1/per_octave)
detail = np.zeros_like(base, dtype=np.float32)
self.total_px += base.shape[1] * base.shape[2] * n
hf, wf = np.int32(np.round(np.array(base.shape)[-2:]/factor))
if min(hf, wf) >= min_size:
smaller_base = _resize(base, (hf, wf))
smaller_detail = self._octave_detail(smaller_base, min_size, per_octave, fn, **kwargs)
detail = _resize(smaller_detail, base.shape[-2:])
self.img = base + detail
kwargs.update(fnargs)
self._step(**kwargs)
return self.img - base
def layers(self, pattern='.*'):
"""Returns a list of layer names matching a regular expression."""
layers = []
for i, layer in enumerate(self.net.blobs.keys()):
if i == 0 or layer.partition('_split_')[1]:
continue
if re.fullmatch(pattern, layer):
layers.append(layer)
if not layers:
raise KeyError('no layers found')
return layers
def classify(self, input_img, n=1, **kwargs):
"""Classifies the input image and returns the n most probable categories.
Args:
input_img: The image to process (PIL images or Numpy arrays are accepted).
n: The n most probable categories to return.
max_tile_size: Does not allow the image dimension to exceed this.
Returns:
A list containing the n most probable categories."""
prob = self.get_features(input_img, ['prob'], **kwargs)['prob']
indices = prob.argsort()[::-1][:n]
return [(prob[i], self.categories[i]) for i in indices]
def prepare_layer_list(self, layers):
if isinstance(layers, str):
layers = [layers]
if isinstance(layers, list):
layers = {layer: 1 for layer in layers}
_layers = OrderedDict()
for layer in reversed(self.net.blobs.keys()):
if layer in layers:
_layers[layer] = layers[layer]
return _layers
def prepare_guide_weights(self, guide_img, layers=None, max_guide_size=512):
if not layers:
layers = self.layers()
if isinstance(layers, str):
layers = [layers]
guide_features = self.get_features(guide_img, layers, max_tile_size=max_guide_size)
weights = {}
for layer in layers:
if guide_features[layer].ndim != 3:
continue
v = np.sum(guide_features[layer], axis=(1, 2), keepdims=True)
weights[layer] = v/normf(v, 1)
return self.prepare_layer_list(weights)
def subset_layers(self, layers, new_layers):
_layers = OrderedDict()
for layer in new_layers:
_layers[layer] = layers[layer]
return _layers
def dream(self, input_img, layers, progress=True, save_intermediates=False, **kwargs):
"""Runs the Deep Dream multiscale gradient ascent algorithm on the input image.
Args:
input_img: The image to process (PIL images or Numpy arrays are accepted)
layers (dict): The layer/feature weights to use in the objective function for gradient
ascent.
progress (Optional[bool]): Display a progress bar while computing.
min_size (Optional[int]): Don't permit the small edge of the image to go below this.
per_octave (Optional[int]): Determines the difference between each scale; for instance,
the default of 2 means that a 1000x1000 input image will get processed as 707x707
and 500x500.
n (Optional[int]): The number of gradient ascent steps per scale. Defaults to 10.
step_size (Optional[float]): The strength of each individual gradient ascent step.
max_tile_size (Optional[int]): Defaults to 512, suitable for a GPU with 2 GB RAM.
Higher values perform better; if Caffe runs out of GPU memory and crashes then it
should be lowered.
Returns:
The unclipped processed image as a float32 ndarray which has a valid range of 0-255 but
which may contain components that are less than 0 or greater than 255.
deep_dream.to_image() can be used to convert the ndarray to a PIL image.
"""
self.ensure_healthy()
_layers = self.prepare_layer_list(layers)
input_arr = self._preprocess(np.float32(input_img))
self.total_px = 0
self.progress_bar = None
self.step = 0
try:
detail = self._octave_detail(input_arr, layers=_layers, progress=progress,
save_intermediates=save_intermediates, **kwargs)
except KeyboardInterrupt:
self.__del__()
raise CaffeStateError('Worker processes left in inconsistent states. Terminating them.')
finally:
if self.progress_bar:
self.progress_bar.close()
stream.redirected = False
output = self._deprocess(detail + input_arr)
if save_intermediates:
to_image(output).save('out%04d.bmp' % self.step)
return output
def dream_guided(self, input_img, guide_img, layers, max_guide_size=512, **kwargs):
"""Performs guided gradient ascent on input_img, weighted by the feature map channel sums
of guide_img. This algorithm works best using a relatively large number of layers, such as
(for googlenet) anything matching the regular expression 'inception_../output'. The
relative weights of the layers are determined automatically."""
self.ensure_healthy()
weights = self.prepare_guide_weights(guide_img, layers, max_guide_size)
return self.dream(input_img, weights, **kwargs)
| [
"numpy.sum",
"numpy.random.seed",
"numpy.abs",
"multiprocessing.get_context",
"pathlib.Path",
"numpy.random.randint",
"numpy.round",
"numpy.prod",
"numpy.zeros_like",
"re.fullmatch",
"OpenEXR.Header",
"numpy.finfo",
"numpy.dstack",
"tqdm.tqdm",
"numpy.roll",
"scipy.ndimage.convolve1d",... | [((886, 913), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (903, 913), False, 'import logging\n'), ((953, 976), 'multiprocessing.get_context', 'mp.get_context', (['"""spawn"""'], {}), "('spawn')\n", (967, 976), True, 'import multiprocessing as mp\n'), ((1077, 1130), 'collections.namedtuple', 'namedtuple', (['"""CNNData"""', '"""deploy model mean categories"""'], {}), "('CNNData', 'deploy model mean categories')\n", (1087, 1130), False, 'from collections import namedtuple, OrderedDict\n'), ((983, 1003), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (991, 1003), True, 'import numpy as np\n'), ((4092, 4107), 'numpy.float32', 'np.float32', (['arr'], {}), '(arr)\n', (4102, 4107), True, 'import numpy as np\n'), ((4650, 4701), 'scipy.ndimage.convolve1d', 'ndimage.convolve1d', (['x', '[-1, 1]'], {'axis': '(2)', 'mode': '"""wrap"""'}), "(x, [-1, 1], axis=2, mode='wrap')\n", (4668, 4701), False, 'from scipy import ndimage\n'), ((4715, 4766), 'scipy.ndimage.convolve1d', 'ndimage.convolve1d', (['x', '[-1, 1]'], {'axis': '(1)', 'mode': '"""wrap"""'}), "(x, [-1, 1], axis=1, mode='wrap')\n", (4733, 4766), False, 'from scipy import ndimage\n'), ((4865, 4887), 'numpy.sum', 'np.sum', (['grad_norm_beta'], {}), '(grad_norm_beta)\n', (4871, 4887), True, 'import numpy as np\n'), ((1212, 1226), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1216, 1226), False, 'from pathlib import Path\n'), ((4275, 4297), 'PIL.Image.fromarray', 'Image.fromarray', (['plane'], {}), '(plane)\n', (4290, 4297), False, 'from PIL import Image\n'), ((4499, 4517), 'numpy.roll', 'np.roll', (['arr', 'x', '(2)'], {}), '(arr, x, 2)\n', (4506, 4517), True, 'import numpy as np\n'), ((6500, 6520), 'caffe.set_mode_cpu', 'caffe.set_mode_cpu', ([], {}), '()\n', (6518, 6520), False, 'import caffe\n'), ((6622, 6646), 'numpy.float32', 'np.float32', (['cnndata.mean'], {}), '(cnndata.mean)\n', (6632, 6646), True, 'import numpy as np\n'), ((7042, 7074), 'numpy.zeros_like', 'np.zeros_like', (["self.data['data']"], {}), "(self.data['data'])\n", (7055, 7074), True, 'import numpy as np\n'), ((8748, 8797), 'numpy.dstack', 'np.dstack', (['(img + self.mean[:, None, None])[::-1]'], {}), '((img + self.mean[:, None, None])[::-1])\n', (8757, 8797), True, 'import numpy as np\n'), ((9746, 9759), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9757, 9759), False, 'from collections import namedtuple, OrderedDict\n'), ((10469, 10492), 'numpy.zeros_like', 'np.zeros_like', (['self.img'], {}), '(self.img)\n', (10482, 10492), True, 'import numpy as np\n'), ((11603, 11639), 'numpy.random.seed', 'np.random.seed', (['(self.img.size + seed)'], {}), '(self.img.size + seed)\n', (11617, 11639), True, 'import numpy as np\n'), ((12805, 12842), 'numpy.zeros_like', 'np.zeros_like', (['base'], {'dtype': 'np.float32'}), '(base, dtype=np.float32)\n', (12818, 12842), True, 'import numpy as np\n'), ((14635, 14648), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (14646, 14648), False, 'from collections import namedtuple, OrderedDict\n'), ((15459, 15472), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (15470, 15472), False, 'from collections import namedtuple, OrderedDict\n'), ((3112, 3124), 'numpy.sign', 'np.sign', (['arr'], {}), '(arr)\n', (3119, 3124), True, 'import numpy as np\n'), ((3318, 3360), 'OpenEXR.Header', 'OpenEXR.Header', (['arr.shape[1]', 'arr.shape[0]'], {}), '(arr.shape[1], arr.shape[0])\n', (3332, 3360), False, 'import OpenEXR\n'), ((3633, 3662), 'tifffile.TiffWriter', 'tifffile.TiffWriter', (['filename'], {}), '(filename)\n', (3652, 3662), False, 'import tifffile\n'), ((4404, 4417), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (4412, 4417), True, 'import numpy as np\n'), ((9284, 9305), 'numpy.float32', 'np.float32', (['input_img'], {}), '(input_img)\n', (9294, 9305), True, 'import numpy as np\n'), ((11689, 11730), 'numpy.random.randint', 'np.random.randint', (['(-jitter)', '(jitter + 1)', '(2)'], {}), '(-jitter, jitter + 1, 2)\n', (11706, 11730), True, 'import numpy as np\n'), ((13629, 13657), 're.fullmatch', 're.fullmatch', (['pattern', 'layer'], {}), '(pattern, layer)\n', (13641, 13657), False, 'import re\n'), ((15242, 15299), 'numpy.sum', 'np.sum', (['guide_features[layer]'], {'axis': '(1, 2)', 'keepdims': '(True)'}), '(guide_features[layer], axis=(1, 2), keepdims=True)\n', (15248, 15299), True, 'import numpy as np\n'), ((17210, 17231), 'numpy.float32', 'np.float32', (['input_img'], {}), '(input_img)\n', (17220, 17231), True, 'import numpy as np\n'), ((747, 778), 'tqdm.tqdm.write', 'tqdm.write', (['s'], {'file': 'self.stream'}), '(s, file=self.stream)\n', (757, 778), False, 'from tqdm import tqdm\n'), ((3125, 3136), 'numpy.abs', 'np.abs', (['arr'], {}), '(arr)\n', (3131, 3136), True, 'import numpy as np\n'), ((3993, 4006), 'numpy.round', 'np.round', (['arr'], {}), '(arr)\n', (4001, 4006), True, 'import numpy as np\n'), ((10149, 10252), 'tqdm.tqdm', 'tqdm', ([], {'total': 'self.total_px', 'unit': '"""pix"""', 'unit_scale': '(True)', 'ncols': '(80)', 'dynamic_ncols': '(True)', 'smoothing': '(0.1)'}), "(total=self.total_px, unit='pix', unit_scale=True, ncols=80,\n dynamic_ncols=True, smoothing=0.1)\n", (10153, 10252), False, 'from tqdm import tqdm\n'), ((8558, 8573), 'numpy.float32', 'np.float32', (['img'], {}), '(img)\n', (8568, 8573), True, 'import numpy as np\n'), ((11397, 11421), 'numpy.prod', 'np.prod', (['grad.shape[-2:]'], {}), '(grad.shape[-2:])\n', (11404, 11421), True, 'import numpy as np\n'), ((11901, 11910), 'numpy.abs', 'np.abs', (['g'], {}), '(g)\n', (11907, 11910), True, 'import numpy as np\n'), ((12021, 12038), 'numpy.sign', 'np.sign', (['self.img'], {}), '(self.img)\n', (12028, 12038), True, 'import numpy as np\n'), ((12041, 12065), 'numpy.abs', 'np.abs', (['(self.img / 127.5)'], {}), '(self.img / 127.5)\n', (12047, 12065), True, 'import numpy as np\n'), ((12937, 12957), 'numpy.array', 'np.array', (['base.shape'], {}), '(base.shape)\n', (12945, 12957), True, 'import numpy as np\n')] |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle import nn, optimizer
import numpy as np
from paddle import log
from dataloader import BasicDataset
from paddle.io import Dataset
from time import time
from sklearn.metrics import roc_auc_score
import random
import os
from tqdm import tqdm
def UniformSample_original_python(dataset):
"""
the original impliment of BPR Sampling in LightGCN
:return:
np.array
"""
total_start = time()
dataset: BasicDataset
user_num = dataset.trainDataSize
users = np.random.randint(0, dataset.n_users, user_num)
allPos = dataset.allPos
S = []
sample_time1 = 0.
sample_time2 = 0.
for i, user in enumerate(tqdm(users)):
start = time()
posForUser = allPos[user]
if len(posForUser) == 0:
continue
sample_time2 += time() - start
posindex = np.random.randint(0, len(posForUser))
positem = posForUser[posindex]
while True:
negitem = np.random.randint(0, dataset.m_items)
if negitem in posForUser:
continue
else:
break
S.append([user, positem, negitem])
end = time()
sample_time1 += end - start
total = time() - total_start
return np.array(S)
def shuffle(*arrays, **kwargs):
require_indices = kwargs.get('indices', False)
if len(set(len(x) for x in arrays)) != 1:
raise ValueError('All inputs to shuffle must have ' 'the same length.')
shuffle_indices = np.arange(len(arrays[0]))
np.random.shuffle(shuffle_indices)
if len(arrays) == 1:
result = arrays[0][shuffle_indices]
else:
result = tuple(x[shuffle_indices] for x in arrays)
if require_indices:
return result, shuffle_indices
else:
return result
def minibatch(*tensors, **kwargs):
batch_size = kwargs.get('batch_size', 128)
if len(tensors) == 1:
tensor = tensors[0]
for i in range(0, len(tensor), batch_size):
yield tensor[i:i + batch_size]
else:
for i in range(0, len(tensors[0]), batch_size):
yield tuple(x[i:i + batch_size] for x in tensors)
class timer:
"""
Time context manager for code block
with timer():
do something
timer.get()
"""
from time import time
TAPE = [-1] # global time record
NAMED_TAPE = {}
@staticmethod
def get():
if len(timer.TAPE) > 1:
return timer.TAPE.pop()
else:
return -1
@staticmethod
def dict(select_keys=None):
hint = "|"
if select_keys is None:
for key, value in timer.NAMED_TAPE.items():
hint = hint + f"{key}:{value:.2f}|"
else:
for key in select_keys:
value = timer.NAMED_TAPE[key]
hint = hint + f"{key}:{value:.2f}|"
return hint
@staticmethod
def zero(select_keys=None):
if select_keys is None:
for key, value in timer.NAMED_TAPE.items():
timer.NAMED_TAPE[key] = 0
else:
for key in select_keys:
timer.NAMED_TAPE[key] = 0
def __init__(self, tape=None, **kwargs):
if kwargs.get('name'):
timer.NAMED_TAPE[kwargs['name']] = timer.NAMED_TAPE[kwargs[
'name']] if timer.NAMED_TAPE.get(kwargs['name']) else 0.
self.named = kwargs['name']
if kwargs.get("group"):
#TODO: add group function
pass
else:
self.named = False
self.tape = tape or timer.TAPE
def __enter__(self):
self.start = timer.time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.named:
timer.NAMED_TAPE[self.named] += timer.time() - self.start
else:
self.tape.append(timer.time() - self.start)
# ====================Metrics==============================
# =========================================================
def RecallPrecision_ATk(test_data, r, k):
"""
test_data should be a list? cause users may have different amount of pos items. shape (test_batch, k)
pred_data : shape (test_batch, k) NOTE: pred_data should be pre-sorted
k : top-k
"""
right_pred = r[:, :k].sum(1)
precis_n = k
recall_n = np.array([len(test_data[i]) for i in range(len(test_data))])
recall = np.sum(right_pred / recall_n)
precis = np.sum(right_pred) / precis_n
return {'recall': recall, 'precision': precis}
def MRRatK_r(r, k):
"""
Mean Reciprocal Rank
"""
pred_data = r[:, :k]
scores = np.log2(1. / np.arange(1, k + 1))
pred_data = pred_data / scores
pred_data = pred_data.sum(1)
return np.sum(pred_data)
def NDCGatK_r(test_data, r, k):
"""
Normalized Discounted Cumulative Gain
rel_i = 1 or 0, so 2^{rel_i} - 1 = 1 or 0
"""
assert len(r) == len(test_data)
pred_data = r[:, :k]
test_matrix = np.zeros((len(pred_data), k))
for i, items in enumerate(test_data):
length = k if k <= len(items) else len(items)
test_matrix[i, :length] = 1
max_r = test_matrix
idcg = np.sum(max_r * 1. / np.log2(np.arange(2, k + 2)), axis=1)
dcg = pred_data * (1. / np.log2(np.arange(2, k + 2)))
dcg = np.sum(dcg, axis=1)
idcg[idcg == 0.] = 1.
ndcg = dcg / idcg
ndcg[np.isnan(ndcg)] = 0.
return np.sum(ndcg)
def AUC(all_item_scores, dataset, test_data):
"""
design for a single user
"""
dataset: BasicDataset
r_all = np.zeros((dataset.m_items, ))
r_all[test_data] = 1
r = r_all[all_item_scores >= 0]
test_item_scores = all_item_scores[all_item_scores >= 0]
return roc_auc_score(r, test_item_scores)
def getLabel(test_data, pred_data):
r = []
for i in range(len(test_data)):
groundTrue = test_data[i]
predictTopK = pred_data[i]
pred = list(map(lambda x: x in groundTrue, predictTopK))
pred = np.array(pred).astype("float")
r.append(pred)
return np.array(r).astype('float')
| [
"tqdm.tqdm",
"numpy.sum",
"numpy.zeros",
"numpy.isnan",
"time.time",
"sklearn.metrics.roc_auc_score",
"numpy.random.randint",
"numpy.array",
"numpy.arange",
"numpy.random.shuffle"
] | [((1042, 1048), 'time.time', 'time', ([], {}), '()\n', (1046, 1048), False, 'from time import time\n'), ((1124, 1171), 'numpy.random.randint', 'np.random.randint', (['(0)', 'dataset.n_users', 'user_num'], {}), '(0, dataset.n_users, user_num)\n', (1141, 1171), True, 'import numpy as np\n'), ((1872, 1883), 'numpy.array', 'np.array', (['S'], {}), '(S)\n', (1880, 1883), True, 'import numpy as np\n'), ((2150, 2184), 'numpy.random.shuffle', 'np.random.shuffle', (['shuffle_indices'], {}), '(shuffle_indices)\n', (2167, 2184), True, 'import numpy as np\n'), ((5052, 5081), 'numpy.sum', 'np.sum', (['(right_pred / recall_n)'], {}), '(right_pred / recall_n)\n', (5058, 5081), True, 'import numpy as np\n'), ((5390, 5407), 'numpy.sum', 'np.sum', (['pred_data'], {}), '(pred_data)\n', (5396, 5407), True, 'import numpy as np\n'), ((5949, 5968), 'numpy.sum', 'np.sum', (['dcg'], {'axis': '(1)'}), '(dcg, axis=1)\n', (5955, 5968), True, 'import numpy as np\n'), ((6058, 6070), 'numpy.sum', 'np.sum', (['ndcg'], {}), '(ndcg)\n', (6064, 6070), True, 'import numpy as np\n'), ((6206, 6234), 'numpy.zeros', 'np.zeros', (['(dataset.m_items,)'], {}), '((dataset.m_items,))\n', (6214, 6234), True, 'import numpy as np\n'), ((6369, 6403), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['r', 'test_item_scores'], {}), '(r, test_item_scores)\n', (6382, 6403), False, 'from sklearn.metrics import roc_auc_score\n'), ((1285, 1296), 'tqdm.tqdm', 'tqdm', (['users'], {}), '(users)\n', (1289, 1296), False, 'from tqdm import tqdm\n'), ((1315, 1321), 'time.time', 'time', ([], {}), '()\n', (1319, 1321), False, 'from time import time\n'), ((1785, 1791), 'time.time', 'time', ([], {}), '()\n', (1789, 1791), False, 'from time import time\n'), ((1840, 1846), 'time.time', 'time', ([], {}), '()\n', (1844, 1846), False, 'from time import time\n'), ((5095, 5113), 'numpy.sum', 'np.sum', (['right_pred'], {}), '(right_pred)\n', (5101, 5113), True, 'import numpy as np\n'), ((6026, 6040), 'numpy.isnan', 'np.isnan', (['ndcg'], {}), '(ndcg)\n', (6034, 6040), True, 'import numpy as np\n'), ((1434, 1440), 'time.time', 'time', ([], {}), '()\n', (1438, 1440), False, 'from time import time\n'), ((1587, 1624), 'numpy.random.randint', 'np.random.randint', (['(0)', 'dataset.m_items'], {}), '(0, dataset.m_items)\n', (1604, 1624), True, 'import numpy as np\n'), ((5290, 5309), 'numpy.arange', 'np.arange', (['(1)', '(k + 1)'], {}), '(1, k + 1)\n', (5299, 5309), True, 'import numpy as np\n'), ((6703, 6714), 'numpy.array', 'np.array', (['r'], {}), '(r)\n', (6711, 6714), True, 'import numpy as np\n'), ((5851, 5870), 'numpy.arange', 'np.arange', (['(2)', '(k + 2)'], {}), '(2, k + 2)\n', (5860, 5870), True, 'import numpy as np\n'), ((5917, 5936), 'numpy.arange', 'np.arange', (['(2)', '(k + 2)'], {}), '(2, k + 2)\n', (5926, 5936), True, 'import numpy as np\n'), ((6638, 6652), 'numpy.array', 'np.array', (['pred'], {}), '(pred)\n', (6646, 6652), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2022 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for Hourglass model."""
from absl.testing import absltest
from absl.testing import parameterized
import gin
import jax
import numpy as np
from trax import fastmath
from trax import layers as tl
from trax import shapes
import trax.layers.research.resampling as resampling
import trax.models.research.hourglass as hourglass
class HourglassTest(parameterized.TestCase):
def _check_forward_shape(self, model, input_shape, output_vocab_size):
x = np.ones(input_shape).astype(np.int32)
model.init(shapes.signature(x))
y = model(x)
self.assertEqual(y.shape, (*input_shape, output_vocab_size))
def test_hourglass_lm_forward_shape(self):
d_model = 16
vocab_size = 7
model = hourglass.HourglassLM(
vocab_size,
hierarchy='2@3 2@6 2@3',
vanilla_layers=(1, 1),
d_model=d_model,
d_ff=d_model,
n_heads=2,
)
batch_size, seq_len = 3, 24
self._check_forward_shape(model,
input_shape=(batch_size, seq_len),
output_vocab_size=vocab_size)
def test_lsh_attention_in_vanilla(self):
d_model = 16
vocab_size = 7
gin.bind_parameter('PureLSHSelfAttentionWrapper.pure_lsh_implementation',
tl.PureLSHSelfAttention)
gin.bind_parameter('PureLSHSelfAttention.chunk_len', 2)
model = hourglass.HourglassLM(
vocab_size,
hierarchy='2@3',
vanilla_layers=(1, 1),
d_model=d_model,
d_ff=d_model,
n_heads=2,
vanilla_attn_type=tl.PureLSHSelfAttentionWrapper,
downsampling_fn=resampling.LinearPooling,
upsampling_fn=resampling.LinearUpsampling,
)
batch_size, seq_len = 3, 12
self._check_forward_shape(
model, input_shape=(batch_size, seq_len), output_vocab_size=vocab_size)
def _test_autoregressive_property(self, model, input_shape,
output_vocab_size):
rng_1 = jax.random.PRNGKey(0)
rng_2 = jax.random.PRNGKey(1)
def _get_output_logits(unitialized_eval_model: tl.Layer, x):
input_signature = shapes.signature(x)
unitialized_eval_model.init(input_signature, rng=rng_1, use_cache=False)
output_logits, *_ = unitialized_eval_model(x, rng=rng_1)
return output_logits
def check_autoregressive_property(model):
with fastmath.use_backend(fastmath.Backend.JAX):
x_1 = jax.random.randint(rng_1, input_shape, 0, output_vocab_size)
y_1 = _get_output_logits(model, x_1)
x_2 = jax.random.randint(rng_2, input_shape, 0, output_vocab_size)
for i in range(input_shape[1]):
masked_x_2 = np.concatenate((x_1[:, :i], x_2[:, i:]), axis=1)
y_2 = _get_output_logits(model, masked_x_2)
self.assertEqual(y_2.shape[0], input_shape[1])
np.testing.assert_array_almost_equal(y_1[:i + 1], y_2[:i + 1])
check_autoregressive_property(model)
def test_hourglass_lm_autoregressive_property(self):
d_model = 8
vocab_size = 26
model_single_stage = hourglass.HourglassLM(
vocab_size,
hierarchy='2@4',
vanilla_layers=(1, 1),
d_model=d_model,
d_ff=d_model,
n_heads=2,
)
model_multi_stage = hourglass.HourglassLM(
vocab_size,
hierarchy='2@3 2@6 2@3',
vanilla_layers=(1, 1),
d_model=d_model,
d_ff=d_model,
n_heads=2,
)
input_shape = (1, 12)
self._test_autoregressive_property(model_single_stage, input_shape,
output_vocab_size=vocab_size)
self._test_autoregressive_property(model_multi_stage, input_shape,
output_vocab_size=vocab_size)
def test_parse_hourglass_hierarchy(self):
self.assertEqual(hourglass._parse_hierarchy('6@3'), ([6], [3]))
self.assertEqual(hourglass._parse_hierarchy('3@2 2@6 5@24 2@6 3@2'), (
[3, 2, 5], [2, 3, 4]
))
self.assertRaises(ValueError, hourglass._parse_hierarchy, '1@2 2@3 1@2')
self.assertRaises(ValueError, hourglass._parse_hierarchy, '1@2 2@3')
if __name__ == '__main__':
absltest.main()
| [
"absl.testing.absltest.main",
"gin.bind_parameter",
"trax.shapes.signature",
"numpy.ones",
"jax.random.PRNGKey",
"jax.random.randint",
"trax.models.research.hourglass.HourglassLM",
"trax.models.research.hourglass._parse_hierarchy",
"numpy.testing.assert_array_almost_equal",
"trax.fastmath.use_back... | [((4766, 4781), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (4779, 4781), False, 'from absl.testing import absltest\n'), ((1327, 1455), 'trax.models.research.hourglass.HourglassLM', 'hourglass.HourglassLM', (['vocab_size'], {'hierarchy': '"""2@3 2@6 2@3"""', 'vanilla_layers': '(1, 1)', 'd_model': 'd_model', 'd_ff': 'd_model', 'n_heads': '(2)'}), "(vocab_size, hierarchy='2@3 2@6 2@3', vanilla_layers=(\n 1, 1), d_model=d_model, d_ff=d_model, n_heads=2)\n", (1348, 1455), True, 'import trax.models.research.hourglass as hourglass\n'), ((1786, 1888), 'gin.bind_parameter', 'gin.bind_parameter', (['"""PureLSHSelfAttentionWrapper.pure_lsh_implementation"""', 'tl.PureLSHSelfAttention'], {}), "('PureLSHSelfAttentionWrapper.pure_lsh_implementation',\n tl.PureLSHSelfAttention)\n", (1804, 1888), False, 'import gin\n'), ((1912, 1967), 'gin.bind_parameter', 'gin.bind_parameter', (['"""PureLSHSelfAttention.chunk_len"""', '(2)'], {}), "('PureLSHSelfAttention.chunk_len', 2)\n", (1930, 1967), False, 'import gin\n'), ((1981, 2244), 'trax.models.research.hourglass.HourglassLM', 'hourglass.HourglassLM', (['vocab_size'], {'hierarchy': '"""2@3"""', 'vanilla_layers': '(1, 1)', 'd_model': 'd_model', 'd_ff': 'd_model', 'n_heads': '(2)', 'vanilla_attn_type': 'tl.PureLSHSelfAttentionWrapper', 'downsampling_fn': 'resampling.LinearPooling', 'upsampling_fn': 'resampling.LinearUpsampling'}), "(vocab_size, hierarchy='2@3', vanilla_layers=(1, 1),\n d_model=d_model, d_ff=d_model, n_heads=2, vanilla_attn_type=tl.\n PureLSHSelfAttentionWrapper, downsampling_fn=resampling.LinearPooling,\n upsampling_fn=resampling.LinearUpsampling)\n", (2002, 2244), True, 'import trax.models.research.hourglass as hourglass\n'), ((2586, 2607), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (2604, 2607), False, 'import jax\n'), ((2620, 2641), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(1)'], {}), '(1)\n', (2638, 2641), False, 'import jax\n'), ((3678, 3797), 'trax.models.research.hourglass.HourglassLM', 'hourglass.HourglassLM', (['vocab_size'], {'hierarchy': '"""2@4"""', 'vanilla_layers': '(1, 1)', 'd_model': 'd_model', 'd_ff': 'd_model', 'n_heads': '(2)'}), "(vocab_size, hierarchy='2@4', vanilla_layers=(1, 1),\n d_model=d_model, d_ff=d_model, n_heads=2)\n", (3699, 3797), True, 'import trax.models.research.hourglass as hourglass\n'), ((3874, 4002), 'trax.models.research.hourglass.HourglassLM', 'hourglass.HourglassLM', (['vocab_size'], {'hierarchy': '"""2@3 2@6 2@3"""', 'vanilla_layers': '(1, 1)', 'd_model': 'd_model', 'd_ff': 'd_model', 'n_heads': '(2)'}), "(vocab_size, hierarchy='2@3 2@6 2@3', vanilla_layers=(\n 1, 1), d_model=d_model, d_ff=d_model, n_heads=2)\n", (3895, 4002), True, 'import trax.models.research.hourglass as hourglass\n'), ((1130, 1149), 'trax.shapes.signature', 'shapes.signature', (['x'], {}), '(x)\n', (1146, 1149), False, 'from trax import shapes\n'), ((2732, 2751), 'trax.shapes.signature', 'shapes.signature', (['x'], {}), '(x)\n', (2748, 2751), False, 'from trax import shapes\n'), ((4427, 4460), 'trax.models.research.hourglass._parse_hierarchy', 'hourglass._parse_hierarchy', (['"""6@3"""'], {}), "('6@3')\n", (4453, 4460), True, 'import trax.models.research.hourglass as hourglass\n'), ((4495, 4545), 'trax.models.research.hourglass._parse_hierarchy', 'hourglass._parse_hierarchy', (['"""3@2 2@6 5@24 2@6 3@2"""'], {}), "('3@2 2@6 5@24 2@6 3@2')\n", (4521, 4545), True, 'import trax.models.research.hourglass as hourglass\n'), ((1077, 1097), 'numpy.ones', 'np.ones', (['input_shape'], {}), '(input_shape)\n', (1084, 1097), True, 'import numpy as np\n'), ((2980, 3022), 'trax.fastmath.use_backend', 'fastmath.use_backend', (['fastmath.Backend.JAX'], {}), '(fastmath.Backend.JAX)\n', (3000, 3022), False, 'from trax import fastmath\n'), ((3038, 3098), 'jax.random.randint', 'jax.random.randint', (['rng_1', 'input_shape', '(0)', 'output_vocab_size'], {}), '(rng_1, input_shape, 0, output_vocab_size)\n', (3056, 3098), False, 'import jax\n'), ((3159, 3219), 'jax.random.randint', 'jax.random.randint', (['rng_2', 'input_shape', '(0)', 'output_vocab_size'], {}), '(rng_2, input_shape, 0, output_vocab_size)\n', (3177, 3219), False, 'import jax\n'), ((3284, 3332), 'numpy.concatenate', 'np.concatenate', (['(x_1[:, :i], x_2[:, i:])'], {'axis': '(1)'}), '((x_1[:, :i], x_2[:, i:]), axis=1)\n', (3298, 3332), True, 'import numpy as np\n'), ((3455, 3517), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['y_1[:i + 1]', 'y_2[:i + 1]'], {}), '(y_1[:i + 1], y_2[:i + 1])\n', (3491, 3517), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from kernels import GaussianKernel
# generate the center of the gaussian and the grid
x_q = np.random.uniform(-1,1,2)
x_p = np.meshgrid(np.linspace(-5,5,100),np.linspace(-5,5,100))
x_p_0 = np.reshape(x_p[0],(-1,1))
x_p_1 = np.reshape(x_p[1],(-1,1))
# generate a random 2x2 positive definite matrix with close to orthogonal eigenvectors
a = np.random.random(2)
a = a/np.linalg.norm(a)
b = np.random.random(2)
b = b/np.linalg.norm(b)
b = b - .8*b.dot(a)*a # make b almost proportional to a
b = b/np.linalg.norm(b)
W = np.outer(a,a) + np.outer(b,b)
# compute the kernel value over the entire grid
kernel = GaussianKernel([1,W])
K = np.array([kernel.eval(x_q,np.hstack([x_0,x_1])) for x_0, x_1 in zip(x_p_0, x_p_1)])
# test batch evaluation
K_ = kernel.eval_batch(x_q[...,np.newaxis], np.reshape(x_p, (2,-1)))
assert((K == K_).all())
K = np.reshape(K,x_p[0].shape)
# plot the gaussian
plt.pcolor(x_p[0],x_p[1],K)
plt.plot(x_q[0],x_q[1],'*')
plt.show() | [
"matplotlib.pyplot.pcolor",
"numpy.random.uniform",
"numpy.outer",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"kernels.GaussianKernel",
"numpy.hstack",
"numpy.random.random",
"numpy.linalg.norm",
"numpy.reshape",
"numpy.linspace"
] | [((144, 171), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '(2)'], {}), '(-1, 1, 2)\n', (161, 171), True, 'import numpy as np\n'), ((241, 268), 'numpy.reshape', 'np.reshape', (['x_p[0]', '(-1, 1)'], {}), '(x_p[0], (-1, 1))\n', (251, 268), True, 'import numpy as np\n'), ((275, 302), 'numpy.reshape', 'np.reshape', (['x_p[1]', '(-1, 1)'], {}), '(x_p[1], (-1, 1))\n', (285, 302), True, 'import numpy as np\n'), ((393, 412), 'numpy.random.random', 'np.random.random', (['(2)'], {}), '(2)\n', (409, 412), True, 'import numpy as np\n'), ((441, 460), 'numpy.random.random', 'np.random.random', (['(2)'], {}), '(2)\n', (457, 460), True, 'import numpy as np\n'), ((657, 679), 'kernels.GaussianKernel', 'GaussianKernel', (['[1, W]'], {}), '([1, W])\n', (671, 679), False, 'from kernels import GaussianKernel\n'), ((890, 917), 'numpy.reshape', 'np.reshape', (['K', 'x_p[0].shape'], {}), '(K, x_p[0].shape)\n', (900, 917), True, 'import numpy as np\n'), ((938, 967), 'matplotlib.pyplot.pcolor', 'plt.pcolor', (['x_p[0]', 'x_p[1]', 'K'], {}), '(x_p[0], x_p[1], K)\n', (948, 967), True, 'import matplotlib.pyplot as plt\n'), ((966, 995), 'matplotlib.pyplot.plot', 'plt.plot', (['x_q[0]', 'x_q[1]', '"""*"""'], {}), "(x_q[0], x_q[1], '*')\n", (974, 995), True, 'import matplotlib.pyplot as plt\n'), ((994, 1004), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1002, 1004), True, 'import matplotlib.pyplot as plt\n'), ((188, 211), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(100)'], {}), '(-5, 5, 100)\n', (199, 211), True, 'import numpy as np\n'), ((210, 233), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(100)'], {}), '(-5, 5, 100)\n', (221, 233), True, 'import numpy as np\n'), ((419, 436), 'numpy.linalg.norm', 'np.linalg.norm', (['a'], {}), '(a)\n', (433, 436), True, 'import numpy as np\n'), ((467, 484), 'numpy.linalg.norm', 'np.linalg.norm', (['b'], {}), '(b)\n', (481, 484), True, 'import numpy as np\n'), ((547, 564), 'numpy.linalg.norm', 'np.linalg.norm', (['b'], {}), '(b)\n', (561, 564), True, 'import numpy as np\n'), ((569, 583), 'numpy.outer', 'np.outer', (['a', 'a'], {}), '(a, a)\n', (577, 583), True, 'import numpy as np\n'), ((585, 599), 'numpy.outer', 'np.outer', (['b', 'b'], {}), '(b, b)\n', (593, 599), True, 'import numpy as np\n'), ((836, 860), 'numpy.reshape', 'np.reshape', (['x_p', '(2, -1)'], {}), '(x_p, (2, -1))\n', (846, 860), True, 'import numpy as np\n'), ((709, 730), 'numpy.hstack', 'np.hstack', (['[x_0, x_1]'], {}), '([x_0, x_1])\n', (718, 730), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2018 <NAME> <<EMAIL>>
# Distributed under terms of the MIT license.
"""
Apply final FILTER cleanup and QUAL score recalibration
"""
import argparse
import sys
import pysam
import csv
from numpy import median
# Define global variables
filts_for_info = 'PESR_GT_OVERDISPERSION HIGH_SR_BACKGROUND BOTHSIDES_SUPPORT VARIABLE_ACROSS_BATCHES'.split(
' ')
filts_to_remove = 'HIGH_PCRPLUS_NOCALL_RATE HIGH_PCRMINUS_NOCALL_RATE'.split(
' ')
filts_to_remove = filts_to_remove + filts_for_info
NULL_GTs = [(None, None), (None, )]
REF_GTs = [(0, 0), (0, ), (None, 2)]
NULL_and_REF_GTs = NULL_GTs + REF_GTs
HET_GTs = [(0, 1), (None, 1), (None, 3)]
def import_callrates(table_in):
"""
Import table of variant callrates
"""
callrates = {}
with open(table_in) as tsvfile:
reader = csv.reader(tsvfile, delimiter='\t')
for vid, callrate in reader:
if vid not in callrates.keys():
callrates[vid] = float(callrate)
return callrates
# def get_call_rate(record, samples):
# """
# Get fraction of samples with non-null genotypes
# """
# total_s = [s for s in record.samples if s in samples]
# total = len(total_s)
# nocall_s = [s for s in total_s if record.samples[s]['GT'] in NULL_GTs]
# nocall = len(nocall_s)
# callrate = 1 - ( nocall / total )
# return callrate
def recal_qual_score(record):
"""
Recalibrate quality score for a single variant
"""
quals = []
for s in [s for s in record.samples]:
GT = record.samples[s]['GT']
if GT in NULL_and_REF_GTs:
continue
elif GT in HET_GTs:
quals.append(record.samples[s]['GQ'])
else:
quals.append(999)
if len(quals) > 0:
return int(median(quals))
def cleanup_vcf(vcf, fout, callrates, min_callrate_global=0.85,
min_callrate_smallDels=0.95):
# minus_samples = [s for s in vcf.header.samples if s not in plus_samples]
# male_minus_samples = [s for s in minus_samples if s not in male_samples]
for record in vcf:
# Move several filters from FILTER to INFO
for filt in filts_for_info:
if filt in record.filter:
record.info[filt] = True
# Move HIGH_SR_BACKGROUND
# Remove all HIGH_NOCALL_RATE and HIGH_SR_BACKGROUND tags from FILTER column
newfilts = [
filt for filt in record.filter if filt not in filts_to_remove]
record.filter.clear()
for filt in newfilts:
record.filter.add(filt)
if len(record.filter) == 0:
record.filter.add('PASS')
# #Mark sites with low PCR+ call rate
# plus_callrate = get_call_rate(record, plus_samples)
# if plus_callrate < min_callrate:
# if 'LOW_PCRPLUS_CALL_RATE' not in record.info.keys():
# record.info.keys().append('LOW_PCRPLUS_CALL_RATE')
# record.info['LOW_PCRPLUS_CALL_RATE'] = True
# Mark sites with low PCR- call rate
if record.id in callrates.keys():
callrate = callrates[record.id]
# Mark small (300bp-1kb) deletions with stricter 5% null gt rate,
# and mark all other variants at specified null gt rate
if record.info['SVTYPE'] == 'DEL' \
and record.info['SVLEN'] < 1000 \
and record.info['SVLEN'] > 300:
if callrate < min_callrate_smallDels:
record.filter.add('LOW_CALL_RATE')
else:
if callrate < min_callrate_global:
record.filter.add('LOW_CALL_RATE')
# Recalibrate QUAL score
newQUAL = recal_qual_score(record)
if newQUAL is not None:
record.qual = newQUAL
# Only write out non-empty variants to output file
for s in record.samples:
if record.samples[s]['GT'] not in NULL_and_REF_GTs:
fout.write(record)
break
def main():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('vcf', help='Input vcf (supports "stdin").')
# parser.add_argument('PCRPLUS_samples', help='List of PCRPLUS sample IDs.')
# parser.add_argument('male_samples', help='List of male sample IDs.')
parser.add_argument('fout', help='Output file (supports "stdout").')
parser.add_argument('--callrate-table', help='TSV of variant IDs and ' +
'their corresponding callrates.', required=True)
parser.add_argument('--min-callrate-global', type=float, help='Minimum fraction ' +
'of samples required to have non-missing genotypes for ' +
'all variants.', default=0.85)
parser.add_argument('--min-callrate-smallDels', type=float, help='Minimum fraction ' +
'of samples required to have non-missing genotypes for ' +
'DEL variants between 300bp-1kb.', default=0.95)
args = parser.parse_args()
# Open connection to input VCF
if args.vcf in '- stdin'.split():
vcf = pysam.VariantFile(sys.stdin)
else:
vcf = pysam.VariantFile(args.vcf)
# Add new FILTER lines to VCF header
NEW_FILTERS = ['##FILTER=<ID=LOW_CALL_RATE,Description="Site does not meet ' +
'minimum requirements for fraction of PCR- samples with non-null ' +
'genotypes. Flags sites more prone to false discoveries.">']
header = vcf.header
for filt in NEW_FILTERS:
header.add_line(filt)
# Remove unused FILTER lines from VCF header
for filt in filts_to_remove:
header.filters.remove_header(filt)
# Add new INFO lines to VCF header
NEW_INFOS = ['##INFO=<ID=PESR_GT_OVERDISPERSION,Number=0,Type=Flag,Description=' +
'"PESR genotyping data is overdispersed. Flags sites where genotypes' +
' are likely noisier.">',
'##INFO=<ID=HIGH_SR_BACKGROUND,Number=0,Type=Flag,Description=' +
'"Suspicious accumulation of split reads in predicted non-carrier ' +
'samples. Flags sites more prone to false discoveries and where ' +
'breakpoint precision is reduced.">',
'##INFO=<ID=BOTHSIDES_SUPPORT,Number=0,Type=Flag,Description=' +
'"Variant has read-level support for both sides of breakpoint. ' +
'Indicates higher-confidence variants.">',
'##INFO=<ID=VARIABLE_ACROSS_BATCHES,Number=0,Type=Flag,Description=' +
'"Site appears at variable frequencies across batches. Accuracy ' +
'of allele frequency estimates for these sites may be reduced.">']
for info in NEW_INFOS:
header.add_line(info)
# #Read list of PCR+ samples
# f_plus_samples = open(args.PCRPLUS_samples, 'r')
# plus_samples = f_plus_samples.read().splitlines()
# f_plus_samples.close()
# #Read list of male samples
# f_male_samples = open(args.male_samples, 'r')
# male_samples = f_male_samples.read().splitlines()
# f_male_samples.close()
# Read callrates
callrates = import_callrates(args.callrate_table)
# Open connection to output VCF
if args.fout in '- stdout'.split():
fout = pysam.VariantFile(sys.stdout, 'w', header=vcf.header)
else:
fout = pysam.VariantFile(args.fout, 'w', header=vcf.header)
# Cleanup VCF
cleanup_vcf(vcf, fout, callrates,
min_callrate_global=args.min_callrate_global,
min_callrate_smallDels=args.min_callrate_smallDels,)
fout.close()
if __name__ == '__main__':
main()
| [
"pysam.VariantFile",
"csv.reader",
"argparse.ArgumentParser",
"numpy.median"
] | [((4102, 4205), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n RawDescriptionHelpFormatter)\n', (4125, 4205), False, 'import argparse\n'), ((872, 907), 'csv.reader', 'csv.reader', (['tsvfile'], {'delimiter': '"""\t"""'}), "(tsvfile, delimiter='\\t')\n", (882, 907), False, 'import csv\n'), ((5259, 5287), 'pysam.VariantFile', 'pysam.VariantFile', (['sys.stdin'], {}), '(sys.stdin)\n', (5276, 5287), False, 'import pysam\n'), ((5312, 5339), 'pysam.VariantFile', 'pysam.VariantFile', (['args.vcf'], {}), '(args.vcf)\n', (5329, 5339), False, 'import pysam\n'), ((7464, 7517), 'pysam.VariantFile', 'pysam.VariantFile', (['sys.stdout', '"""w"""'], {'header': 'vcf.header'}), "(sys.stdout, 'w', header=vcf.header)\n", (7481, 7517), False, 'import pysam\n'), ((7543, 7595), 'pysam.VariantFile', 'pysam.VariantFile', (['args.fout', '"""w"""'], {'header': 'vcf.header'}), "(args.fout, 'w', header=vcf.header)\n", (7560, 7595), False, 'import pysam\n'), ((1843, 1856), 'numpy.median', 'median', (['quals'], {}), '(quals)\n', (1849, 1856), False, 'from numpy import median\n')] |
import numpy as np
import torch
from .layers import PeriodicConv2D
class CircUNet(torch.nn.Module):
""" Simple UNet module for image-to-image regression
Assumes image height and width are the same for input and output.
Note that default constructor uses PeriodicConv2D layers !
"""
def __init__(self, in_channels, out_channels, filters, kernels, pooling, activation, mode='circular'):
super(CircUNet, self).__init__()
assert not np.any(kernels == 2), 'kernel size 2 not allowed for circular padding'
ziplist = zip([in_channels] + [f[0] for f in filters[:-1]], filters, kernels)
self.downlayers = [Downscaler(i, f, k, pooling, activation, mode='circular') for i,f,k in ziplist]
# bottom layer: number of filters actually has to increase
i, f, k, o = filters[-1][-1], [2*f for f in filters[-1]], kernels[-1], filters[-1][-1]
self.uplayers = [Upscaler(i, f, k, pooling, o, activation, mode='circular')]
ziplist = zip([2*f[-1] for f in filters[::-1]], filters[::-1], kernels[::-1], [f[-1] for f in filters[::-1][1:]])
self.uplayers += [Upscaler(i, f, k, pooling, o, activation, mode='circular') for i,f,k,o in ziplist]
self.downlayers, self.uplayers = torch.nn.ModuleList(self.downlayers), torch.nn.ModuleList(self.uplayers)
i, f, k = 2*filters[0][0], out_channels, kernels[0][-1]
if mode=='circular':
self.final = PeriodicConv2D(i, f, k, padding=(k-1, k-1), padding_mode='circular')
else:
self.final = torch.nn.Conv2d(i, f, k, padding=k//2)
def forward(self, x):
outs = []
for layer in self.downlayers:
x, out = layer(x)
outs += [out]
for layer, out in zip(self.uplayers, outs[::-1]):
x = layer(x, out)
return self.final(x)
class Downscaler(torch.nn.Module):
def __init__(self, in_channels, filters, kernels, pooling, activation, mode='circular'):
super(Downscaler, self).__init__()
ziplist = zip([in_channels] + list(filters[:-1]), filters, kernels)
if mode=='circular':
self.layers = [PeriodicConv2D(i, f, k, padding=(k-1, k-1),
padding_mode='circular') for i,f,k in ziplist]
else:
self.layers = [torch.nn.Conv2d(i, f, k, padding=k//2) for i,f,k in ziplist]
self.layers = torch.nn.ModuleList(self.layers)
self.pooling = torch.nn.MaxPool2d(pooling)
self.bns = torch.nn.ModuleList([torch.nn.BatchNorm2d(i) for i in filters])
self.activation = activation
def forward(self, x):
for layer, bn in zip(self.layers, self.bns):
x = bn(self.activation(layer(x)))
return self.pooling(x), x
class Upscaler(torch.nn.Module):
def __init__(self, in_channels, filters, kernels, pooling, out_channel, activation, mode='circular'):
super(Upscaler, self).__init__()
ziplist = zip([in_channels] + list(filters[:-1]), filters, kernels)
if mode=='circular':
self.layers = [PeriodicConv2D(i, f, k, padding=(k-1, k-1),
padding_mode='circular') for i, f, k in ziplist]
else:
self.layers = [torch.nn.Conv2d(i, f, k, padding=k//2) for i,f,k in ziplist]
self.layers = torch.nn.ModuleList(self.layers)
self.bns = torch.nn.ModuleList([torch.nn.BatchNorm2d(i) for i in filters])
self.uplayer = torch.nn.ConvTranspose2d(filters[-1], out_channel, pooling, stride=2)
self.activation = activation
def forward(self, x, xskip):
for layer, bn in zip(self.layers, self.bns):
x = bn(self.activation(layer(x)))
x = self.uplayer(x)
return torch.cat((x,xskip), axis=1) # Nx(C+Cskip)xHxW
| [
"torch.nn.ConvTranspose2d",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.cat",
"numpy.any",
"torch.nn.BatchNorm2d",
"torch.nn.MaxPool2d"
] | [((2419, 2451), 'torch.nn.ModuleList', 'torch.nn.ModuleList', (['self.layers'], {}), '(self.layers)\n', (2438, 2451), False, 'import torch\n'), ((2475, 2502), 'torch.nn.MaxPool2d', 'torch.nn.MaxPool2d', (['pooling'], {}), '(pooling)\n', (2493, 2502), False, 'import torch\n'), ((3352, 3384), 'torch.nn.ModuleList', 'torch.nn.ModuleList', (['self.layers'], {}), '(self.layers)\n', (3371, 3384), False, 'import torch\n'), ((3491, 3560), 'torch.nn.ConvTranspose2d', 'torch.nn.ConvTranspose2d', (['filters[-1]', 'out_channel', 'pooling'], {'stride': '(2)'}), '(filters[-1], out_channel, pooling, stride=2)\n', (3515, 3560), False, 'import torch\n'), ((3787, 3816), 'torch.cat', 'torch.cat', (['(x, xskip)'], {'axis': '(1)'}), '((x, xskip), axis=1)\n', (3796, 3816), False, 'import torch\n'), ((475, 495), 'numpy.any', 'np.any', (['(kernels == 2)'], {}), '(kernels == 2)\n', (481, 495), True, 'import numpy as np\n'), ((1270, 1306), 'torch.nn.ModuleList', 'torch.nn.ModuleList', (['self.downlayers'], {}), '(self.downlayers)\n', (1289, 1306), False, 'import torch\n'), ((1308, 1342), 'torch.nn.ModuleList', 'torch.nn.ModuleList', (['self.uplayers'], {}), '(self.uplayers)\n', (1327, 1342), False, 'import torch\n'), ((1578, 1618), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['i', 'f', 'k'], {'padding': '(k // 2)'}), '(i, f, k, padding=k // 2)\n', (1593, 1618), False, 'import torch\n'), ((2336, 2376), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['i', 'f', 'k'], {'padding': '(k // 2)'}), '(i, f, k, padding=k // 2)\n', (2351, 2376), False, 'import torch\n'), ((2543, 2566), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['i'], {}), '(i)\n', (2563, 2566), False, 'import torch\n'), ((3269, 3309), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['i', 'f', 'k'], {'padding': '(k // 2)'}), '(i, f, k, padding=k // 2)\n', (3284, 3309), False, 'import torch\n'), ((3425, 3448), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['i'], {}), '(i)\n', (3445, 3448), False, 'import torch\n')] |
import os
gpuNo=os.environ["CUDA_VISIBLE_DEVICES"] = "0"
severNo='gpu'
import tensorflow as tf
from tflearn.layers.conv import global_avg_pool
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.contrib.layers import batch_norm, flatten
from tensorflow.contrib.framework import arg_scope
import optimizer
import numpy as np
import os
import time
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--T', type=str, default="18_adam", help="identifier of experiment")
parser.add_argument('--growth_k', type=int, default=12, help="growth rate for every layer")
parser.add_argument('--nb_block', type=int, default=2, help="# of dense block + transition layer")
parser.add_argument('--init_learning_rate', type=float, default=0.01, help="initial learning rate")
parser.add_argument('--optimizer_name', type=str, default="adamshiftmoving", help='sgd | adam | amsgrad | adashift')
parser.add_argument('--beta1', type=float, default=0.9, help="beta1 of optimizer")
parser.add_argument('--beta2', type=float, default=0.999, help="beta2 of optimizer")
parser.add_argument('--epsilon', type=float, default=1e-5, help="epsilon of optimizer")
parser.add_argument('--pred_g_op', type=str, default="none", help="pred_g_op of adashift optimizer")
parser.add_argument('--keep_num', type=int, default=20, help="keep_num of adashift optimizer")
parser.add_argument('--dropout_rate', type=float, default=0.2, help="dropout rate")
parser.add_argument('--batch_size', type=int, default=100, help="batch size")
parser.add_argument('--total_epochs', type=int, default=20, help="# of total training epoch")
parser.add_argument('--random_seed', type=int, default=1, help="random seed")
parser.add_argument('--save_epoch', type=int, default=10, help="frequency to save training statistic")
parser.add_argument('--test_span', type=int, default=50, help="step interval for test")
args = parser.parse_args()
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
class_num = 10
total_batch = int(mnist.train.num_examples / args.batch_size)
log_dir='./logs/%s_%d_%s_%3f_%.3f_%.3f' % (args.T, args.keep_num, args.pred_g_op, args.init_learning_rate,
args.beta1, args.beta2)
checkpoint_dir='./model/model_%s' % args.T
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
if not os.path.exists(log_dir+'/result_data'):
os.makedirs(log_dir+'/result_data')
np.random.seed(args.random_seed)
tf.set_random_seed(args.random_seed)
def conv_layer(input, filter, kernel, stride=1, layer_name="conv"):
with tf.name_scope(layer_name):
network = tf.layers.conv2d(inputs=input, filters=filter, kernel_size=kernel, strides=stride, padding='SAME')
return network
def Global_Average_Pooling(x, stride=1):
"""
width = np.shape(x)[1]
height = np.shape(x)[2]
pool_size = [width, height]
return tf.layers.average_pooling2d(inputs=x, pool_size=pool_size, strides=stride) # The stride value does not matter
It is global average pooling without tflearn
"""
return global_avg_pool(x, name='Global_avg_pooling')
# But maybe you need to install h5py and curses or not
def Batch_Normalization(x, training, scope):
with arg_scope([batch_norm],
scope=scope,
updates_collections=None,
decay=0.9,
center=True,
scale=True,
zero_debias_moving_mean=True) :
return tf.cond(training,
lambda : batch_norm(inputs=x, is_training=training, reuse=None),
lambda : batch_norm(inputs=x, is_training=training, reuse=True))
def Drop_out(x, rate, training) :
return tf.layers.dropout(inputs=x, rate=rate, training=training)
def Relu(x):
return tf.nn.relu(x)
def Average_pooling(x, pool_size=[2,2], stride=2, padding='VALID'):
return tf.layers.average_pooling2d(inputs=x, pool_size=pool_size, strides=stride, padding=padding)
def Max_Pooling(x, pool_size=[3,3], stride=2, padding='VALID'):
return tf.layers.max_pooling2d(inputs=x, pool_size=pool_size, strides=stride, padding=padding)
def Concatenation(layers) :
return tf.concat(layers, axis=3)
def Linear(x) :
return tf.layers.dense(inputs=x, units=class_num, name='linear')
class DenseNet():
def __init__(self, x, nb_blocks, filters, training):
self.nb_blocks = nb_blocks
self.filters = filters
self.training = training
self.model = self.Dense_net(x)
def bottleneck_layer(self, x, scope):
# print(x)
with tf.name_scope(scope):
x = Batch_Normalization(x, training=self.training, scope=scope+'_batch1')
x = Relu(x)
x = conv_layer(x, filter=4 * self.filters, kernel=[1,1], layer_name=scope+'_conv1')
x = Drop_out(x, rate=args.dropout_rate, training=self.training)
x = Batch_Normalization(x, training=self.training, scope=scope+'_batch2')
x = Relu(x)
x = conv_layer(x, filter=self.filters, kernel=[3,3], layer_name=scope+'_conv2')
x = Drop_out(x, rate=args.dropout_rate, training=self.training)
# print(x)
return x
def transition_layer(self, x, scope):
with tf.name_scope(scope):
x = Batch_Normalization(x, training=self.training, scope=scope+'_batch1')
x = Relu(x)
x = conv_layer(x, filter=self.filters, kernel=[1,1], layer_name=scope+'_conv1')
x = Drop_out(x, rate=args.dropout_rate, training=self.training)
x = Average_pooling(x, pool_size=[2,2], stride=2)
return x
def dense_block(self, input_x, nb_layers, layer_name):
with tf.name_scope(layer_name):
layers_concat = list()
layers_concat.append(input_x)
x = self.bottleneck_layer(input_x, scope=layer_name + '_bottleN_' + str(0))
layers_concat.append(x)
for i in range(nb_layers - 1):
x = Concatenation(layers_concat)
x = self.bottleneck_layer(x, scope=layer_name + '_bottleN_' + str(i + 1))
layers_concat.append(x)
x = Concatenation(layers_concat)
return x
def Dense_net(self, input_x):
x = conv_layer(input_x, filter=2 * self.filters, kernel=[7,7], stride=2, layer_name='conv0')
x = Max_Pooling(x, pool_size=[3,3], stride=2)
for i in range(self.nb_blocks) :
# 6 -> 12 -> 48
x = self.dense_block(input_x=x, nb_layers=4, layer_name='dense_'+str(i))
x = self.transition_layer(x, scope='trans_'+str(i))
"""
x = self.dense_block(input_x=x, nb_layers=6, layer_name='dense_1')
x = self.transition_layer(x, scope='trans_1')
x = self.dense_block(input_x=x, nb_layers=12, layer_name='dense_2')
x = self.transition_layer(x, scope='trans_2')
x = self.dense_block(input_x=x, nb_layers=48, layer_name='dense_3')
x = self.transition_layer(x, scope='trans_3')
"""
x = self.dense_block(input_x=x, nb_layers=32, layer_name='dense_final')
# 100 Layer
x = Batch_Normalization(x, training=self.training, scope='linear_batch')
x = Relu(x)
x = Global_Average_Pooling(x)
x = flatten(x)
x = Linear(x)
# x = tf.reshape(x, [-1, 10])
return x
x = tf.placeholder(tf.float32, shape=[None, 784])
batch_images = tf.reshape(x, [-1, 28, 28, 1])
label = tf.placeholder(tf.float32, shape=[None, 10])
training_flag = tf.placeholder(tf.bool)
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
logits = DenseNet(x=batch_images, nb_blocks=args.nb_block, filters=args.growth_k, training=training_flag).model
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=label, logits=logits))
optimizer_choise = tf.train.AdamOptimizer(learning_rate=learning_rate,beta1=args.beta1,beta2=args.beta2, epsilon=args.epsilon)
# optimizer_choise = optimizer.AdamShiftN(learning_rate=learning_rate,keep_num=keep_num,beta2=beta2, epsilon=epsilon,pred_g_op=pred_g_op)
train = optimizer_choise.minimize(cost)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(label, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('loss', cost)
tf.summary.scalar('accuracy', accuracy)
saver = tf.train.Saver(tf.global_variables())
config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, intra_op_parallelism_threads=0, inter_op_parallelism_threads=0)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
saver.restore(sess, ckpt.model_checkpoint_path)
else:
sess.run(tf.global_variables_initializer())
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter(log_dir, sess.graph)
global_step = 0
epoch_learning_rate = args.init_learning_rate
test_feed_dict = {
x: mnist.test.images,
label: mnist.test.labels,
learning_rate: epoch_learning_rate,
training_flag : False
}
Test_Acc=np.zeros((args.total_epochs,total_batch//args.test_span+1)) if not os.path.exists(log_dir+'/result_data/Test_Acc.npy') else np.load(log_dir+'/result_data/Test_Acc.npy')
Test_Loss=np.zeros((args.total_epochs,total_batch//args.test_span+1)) if not os.path.exists(log_dir+'/result_data/Test_Loss.npy') else np.load(log_dir+'/result_data/Test_Loss.npy')
Train_Acc=np.zeros((args.total_epochs,total_batch//args.test_span+1)) if not os.path.exists(log_dir+'/result_data/Train_Acc.npy') else np.load(log_dir+'/result_data/Train_Acc.npy')
Train_Loss=np.zeros((args.total_epochs,total_batch//args.test_span+1)) if not os.path.exists(log_dir+'/result_data/Train_Loss.npy') else np.load(log_dir+'/result_data/Train_Loss.npy')
for epoch in range(args.total_epochs):
if epoch == (args.total_epochs * 0.5) or epoch == (args.total_epochs * 0.75):
epoch_learning_rate = epoch_learning_rate / 10
for step in range(total_batch):
batch_x, batch_y = mnist.train.next_batch(args.batch_size)
train_feed_dict = {
x: batch_x,
label: batch_y,
learning_rate: epoch_learning_rate,
training_flag : True
}
_, train_loss = sess.run([train, cost], feed_dict=train_feed_dict)
if step % args.test_span == 0:
global_step += 100
train_summary, train_accuracy = sess.run([merged, accuracy], feed_dict=train_feed_dict)\
test_accuracy,test_loss = sess.run([accuracy,cost], feed_dict=test_feed_dict)
test_summary = tf.Summary(value=[tf.Summary.Value(tag='test_loss', simple_value=test_loss),
tf.Summary.Value(tag='test_accuracy', simple_value=test_accuracy)])
writer.add_summary(train_summary, global_step=epoch*total_batch+step)
writer.add_summary(test_summary, global_step=epoch*total_batch+step)
writer.flush()
print("[Epoch %d Step %3d/%d]: (%s)(%s_%s_%s)\n Train Loss:%.4f Train Acc:%.4f Test_Loss:%.4f Test Acc:%.4f"%(
epoch,step,total_batch,time.strftime('%H:%M:%S',time.localtime(time.time())),gpuNo,severNo,args.T,
train_loss,train_accuracy,test_loss,test_accuracy)
)
Test_Acc[epoch,step//args.test_span]=test_accuracy
Test_Loss[epoch,step//args.test_span]=test_loss
Train_Acc[epoch,step//args.test_span]=train_accuracy
Train_Loss[epoch,step//args.test_span]=train_loss
if epoch % args.save_epoch == 0:
np.save(log_dir+'/result_data/Train_Loss.npy',Train_Loss)
np.save(log_dir+'/result_data/Train_Acc.npy',Train_Acc)
np.save(log_dir+'/result_data/Test_Loss.npy',Test_Loss)
np.save(log_dir+'/result_data/Test_Acc.npy',Test_Acc)
np.save(log_dir+'/result_data/Train_Loss',Train_Loss)
np.save(log_dir+'/result_data/Train_Acc',Train_Acc)
np.save(log_dir+'/result_data/Test_Loss',Test_Loss)
np.save(log_dir+'/result_data/Test_Acc',Test_Acc)
saver.save(sess=sess, save_path=checkpoint_dir+'/dense.ckpt')
sess.close() | [
"numpy.load",
"numpy.random.seed",
"argparse.ArgumentParser",
"tensorflow.contrib.layers.flatten",
"tensorflow.reshape",
"tensorflow.ConfigProto",
"tensorflow.global_variables",
"tensorflow.layers.max_pooling2d",
"tensorflow.nn.relu",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"os.path.e... | [((395, 420), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (418, 420), False, 'import argparse\n'), ((1932, 1985), 'tensorflow.examples.tutorials.mnist.input_data.read_data_sets', 'input_data.read_data_sets', (['"""MNIST_data"""'], {'one_hot': '(True)'}), "('MNIST_data', one_hot=True)\n", (1957, 1985), False, 'from tensorflow.examples.tutorials.mnist import input_data\n'), ((2440, 2472), 'numpy.random.seed', 'np.random.seed', (['args.random_seed'], {}), '(args.random_seed)\n', (2454, 2472), True, 'import numpy as np\n'), ((2473, 2509), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['args.random_seed'], {}), '(args.random_seed)\n', (2491, 2509), True, 'import tensorflow as tf\n'), ((7458, 7503), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 784]'}), '(tf.float32, shape=[None, 784])\n', (7472, 7503), True, 'import tensorflow as tf\n'), ((7519, 7549), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, 28, 28, 1]'], {}), '(x, [-1, 28, 28, 1])\n', (7529, 7549), True, 'import tensorflow as tf\n'), ((7558, 7602), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 10]'}), '(tf.float32, shape=[None, 10])\n', (7572, 7602), True, 'import tensorflow as tf\n'), ((7619, 7642), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {}), '(tf.bool)\n', (7633, 7642), True, 'import tensorflow as tf\n'), ((7659, 7707), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""learning_rate"""'}), "(tf.float32, name='learning_rate')\n", (7673, 7707), True, 'import tensorflow as tf\n'), ((7933, 8047), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate', 'beta1': 'args.beta1', 'beta2': 'args.beta2', 'epsilon': 'args.epsilon'}), '(learning_rate=learning_rate, beta1=args.beta1, beta2\n =args.beta2, epsilon=args.epsilon)\n', (7955, 8047), True, 'import tensorflow as tf\n'), ((8361, 8392), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'cost'], {}), "('loss', cost)\n", (8378, 8392), True, 'import tensorflow as tf\n'), ((8393, 8432), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""accuracy"""', 'accuracy'], {}), "('accuracy', accuracy)\n", (8410, 8432), True, 'import tensorflow as tf\n'), ((8490, 8627), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'log_device_placement': '(False)', 'intra_op_parallelism_threads': '(0)', 'inter_op_parallelism_threads': '(0)'}), '(allow_soft_placement=True, log_device_placement=False,\n intra_op_parallelism_threads=0, inter_op_parallelism_threads=0)\n', (8504, 8627), True, 'import tensorflow as tf\n'), ((8671, 8696), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (8681, 8696), True, 'import tensorflow as tf\n'), ((8705, 8750), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (8734, 8750), True, 'import tensorflow as tf\n'), ((8935, 8957), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (8955, 8957), True, 'import tensorflow as tf\n'), ((8967, 9009), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['log_dir', 'sess.graph'], {}), '(log_dir, sess.graph)\n', (8988, 9009), True, 'import tensorflow as tf\n'), ((12025, 12081), 'numpy.save', 'np.save', (["(log_dir + '/result_data/Train_Loss')", 'Train_Loss'], {}), "(log_dir + '/result_data/Train_Loss', Train_Loss)\n", (12032, 12081), True, 'import numpy as np\n'), ((12079, 12133), 'numpy.save', 'np.save', (["(log_dir + '/result_data/Train_Acc')", 'Train_Acc'], {}), "(log_dir + '/result_data/Train_Acc', Train_Acc)\n", (12086, 12133), True, 'import numpy as np\n'), ((12131, 12185), 'numpy.save', 'np.save', (["(log_dir + '/result_data/Test_Loss')", 'Test_Loss'], {}), "(log_dir + '/result_data/Test_Loss', Test_Loss)\n", (12138, 12185), True, 'import numpy as np\n'), ((12183, 12235), 'numpy.save', 'np.save', (["(log_dir + '/result_data/Test_Acc')", 'Test_Acc'], {}), "(log_dir + '/result_data/Test_Acc', Test_Acc)\n", (12190, 12235), True, 'import numpy as np\n'), ((2288, 2318), 'os.path.exists', 'os.path.exists', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (2302, 2318), False, 'import os\n'), ((2324, 2351), 'os.makedirs', 'os.makedirs', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (2335, 2351), False, 'import os\n'), ((2359, 2399), 'os.path.exists', 'os.path.exists', (["(log_dir + '/result_data')"], {}), "(log_dir + '/result_data')\n", (2373, 2399), False, 'import os\n'), ((2403, 2440), 'os.makedirs', 'os.makedirs', (["(log_dir + '/result_data')"], {}), "(log_dir + '/result_data')\n", (2414, 2440), False, 'import os\n'), ((3083, 3128), 'tflearn.layers.conv.global_avg_pool', 'global_avg_pool', (['x'], {'name': '"""Global_avg_pooling"""'}), "(x, name='Global_avg_pooling')\n", (3098, 3128), False, 'from tflearn.layers.conv import global_avg_pool\n'), ((3744, 3801), 'tensorflow.layers.dropout', 'tf.layers.dropout', ([], {'inputs': 'x', 'rate': 'rate', 'training': 'training'}), '(inputs=x, rate=rate, training=training)\n', (3761, 3801), True, 'import tensorflow as tf\n'), ((3827, 3840), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (3837, 3840), True, 'import tensorflow as tf\n'), ((3921, 4016), 'tensorflow.layers.average_pooling2d', 'tf.layers.average_pooling2d', ([], {'inputs': 'x', 'pool_size': 'pool_size', 'strides': 'stride', 'padding': 'padding'}), '(inputs=x, pool_size=pool_size, strides=stride,\n padding=padding)\n', (3948, 4016), True, 'import tensorflow as tf\n'), ((4090, 4181), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', ([], {'inputs': 'x', 'pool_size': 'pool_size', 'strides': 'stride', 'padding': 'padding'}), '(inputs=x, pool_size=pool_size, strides=stride,\n padding=padding)\n', (4113, 4181), True, 'import tensorflow as tf\n'), ((4218, 4243), 'tensorflow.concat', 'tf.concat', (['layers'], {'axis': '(3)'}), '(layers, axis=3)\n', (4227, 4243), True, 'import tensorflow as tf\n'), ((4272, 4329), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'x', 'units': 'class_num', 'name': '"""linear"""'}), "(inputs=x, units=class_num, name='linear')\n", (4287, 4329), True, 'import tensorflow as tf\n'), ((7843, 7911), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'labels': 'label', 'logits': 'logits'}), '(labels=label, logits=logits)\n', (7882, 7911), True, 'import tensorflow as tf\n'), ((8250, 8270), 'tensorflow.argmax', 'tf.argmax', (['logits', '(1)'], {}), '(logits, 1)\n', (8259, 8270), True, 'import tensorflow as tf\n'), ((8272, 8291), 'tensorflow.argmax', 'tf.argmax', (['label', '(1)'], {}), '(label, 1)\n', (8281, 8291), True, 'import tensorflow as tf\n'), ((8319, 8358), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (8326, 8358), True, 'import tensorflow as tf\n'), ((8457, 8478), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (8476, 8478), True, 'import tensorflow as tf\n'), ((8763, 8817), 'tensorflow.train.checkpoint_exists', 'tf.train.checkpoint_exists', (['ckpt.model_checkpoint_path'], {}), '(ckpt.model_checkpoint_path)\n', (8789, 8817), True, 'import tensorflow as tf\n'), ((9226, 9290), 'numpy.zeros', 'np.zeros', (['(args.total_epochs, total_batch // args.test_span + 1)'], {}), '((args.total_epochs, total_batch // args.test_span + 1))\n', (9234, 9290), True, 'import numpy as np\n'), ((9352, 9398), 'numpy.load', 'np.load', (["(log_dir + '/result_data/Test_Acc.npy')"], {}), "(log_dir + '/result_data/Test_Acc.npy')\n", (9359, 9398), True, 'import numpy as np\n'), ((9407, 9471), 'numpy.zeros', 'np.zeros', (['(args.total_epochs, total_batch // args.test_span + 1)'], {}), '((args.total_epochs, total_batch // args.test_span + 1))\n', (9415, 9471), True, 'import numpy as np\n'), ((9532, 9579), 'numpy.load', 'np.load', (["(log_dir + '/result_data/Test_Loss.npy')"], {}), "(log_dir + '/result_data/Test_Loss.npy')\n", (9539, 9579), True, 'import numpy as np\n'), ((9588, 9652), 'numpy.zeros', 'np.zeros', (['(args.total_epochs, total_batch // args.test_span + 1)'], {}), '((args.total_epochs, total_batch // args.test_span + 1))\n', (9596, 9652), True, 'import numpy as np\n'), ((9713, 9760), 'numpy.load', 'np.load', (["(log_dir + '/result_data/Train_Acc.npy')"], {}), "(log_dir + '/result_data/Train_Acc.npy')\n", (9720, 9760), True, 'import numpy as np\n'), ((9770, 9834), 'numpy.zeros', 'np.zeros', (['(args.total_epochs, total_batch // args.test_span + 1)'], {}), '((args.total_epochs, total_batch // args.test_span + 1))\n', (9778, 9834), True, 'import numpy as np\n'), ((9896, 9944), 'numpy.load', 'np.load', (["(log_dir + '/result_data/Train_Loss.npy')"], {}), "(log_dir + '/result_data/Train_Loss.npy')\n", (9903, 9944), True, 'import numpy as np\n'), ((2589, 2614), 'tensorflow.name_scope', 'tf.name_scope', (['layer_name'], {}), '(layer_name)\n', (2602, 2614), True, 'import tensorflow as tf\n'), ((2634, 2737), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'input', 'filters': 'filter', 'kernel_size': 'kernel', 'strides': 'stride', 'padding': '"""SAME"""'}), "(inputs=input, filters=filter, kernel_size=kernel, strides=\n stride, padding='SAME')\n", (2650, 2737), True, 'import tensorflow as tf\n'), ((3244, 3376), 'tensorflow.contrib.framework.arg_scope', 'arg_scope', (['[batch_norm]'], {'scope': 'scope', 'updates_collections': 'None', 'decay': '(0.9)', 'center': '(True)', 'scale': '(True)', 'zero_debias_moving_mean': '(True)'}), '([batch_norm], scope=scope, updates_collections=None, decay=0.9,\n center=True, scale=True, zero_debias_moving_mean=True)\n', (3253, 3376), False, 'from tensorflow.contrib.framework import arg_scope\n'), ((7363, 7373), 'tensorflow.contrib.layers.flatten', 'flatten', (['x'], {}), '(x)\n', (7370, 7373), False, 'from tensorflow.contrib.layers import batch_norm, flatten\n'), ((8890, 8923), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (8921, 8923), True, 'import tensorflow as tf\n'), ((9293, 9346), 'os.path.exists', 'os.path.exists', (["(log_dir + '/result_data/Test_Acc.npy')"], {}), "(log_dir + '/result_data/Test_Acc.npy')\n", (9307, 9346), False, 'import os\n'), ((9474, 9528), 'os.path.exists', 'os.path.exists', (["(log_dir + '/result_data/Test_Loss.npy')"], {}), "(log_dir + '/result_data/Test_Loss.npy')\n", (9488, 9528), False, 'import os\n'), ((9655, 9709), 'os.path.exists', 'os.path.exists', (["(log_dir + '/result_data/Train_Acc.npy')"], {}), "(log_dir + '/result_data/Train_Acc.npy')\n", (9669, 9709), False, 'import os\n'), ((9837, 9892), 'os.path.exists', 'os.path.exists', (["(log_dir + '/result_data/Train_Loss.npy')"], {}), "(log_dir + '/result_data/Train_Loss.npy')\n", (9851, 9892), False, 'import os\n'), ((11776, 11836), 'numpy.save', 'np.save', (["(log_dir + '/result_data/Train_Loss.npy')", 'Train_Loss'], {}), "(log_dir + '/result_data/Train_Loss.npy', Train_Loss)\n", (11783, 11836), True, 'import numpy as np\n'), ((11842, 11900), 'numpy.save', 'np.save', (["(log_dir + '/result_data/Train_Acc.npy')", 'Train_Acc'], {}), "(log_dir + '/result_data/Train_Acc.npy', Train_Acc)\n", (11849, 11900), True, 'import numpy as np\n'), ((11906, 11964), 'numpy.save', 'np.save', (["(log_dir + '/result_data/Test_Loss.npy')", 'Test_Loss'], {}), "(log_dir + '/result_data/Test_Loss.npy', Test_Loss)\n", (11913, 11964), True, 'import numpy as np\n'), ((11970, 12026), 'numpy.save', 'np.save', (["(log_dir + '/result_data/Test_Acc.npy')", 'Test_Acc'], {}), "(log_dir + '/result_data/Test_Acc.npy', Test_Acc)\n", (11977, 12026), True, 'import numpy as np\n'), ((4622, 4642), 'tensorflow.name_scope', 'tf.name_scope', (['scope'], {}), '(scope)\n', (4635, 4642), True, 'import tensorflow as tf\n'), ((5307, 5327), 'tensorflow.name_scope', 'tf.name_scope', (['scope'], {}), '(scope)\n', (5320, 5327), True, 'import tensorflow as tf\n'), ((5764, 5789), 'tensorflow.name_scope', 'tf.name_scope', (['layer_name'], {}), '(layer_name)\n', (5777, 5789), True, 'import tensorflow as tf\n'), ((3554, 3608), 'tensorflow.contrib.layers.batch_norm', 'batch_norm', ([], {'inputs': 'x', 'is_training': 'training', 'reuse': 'None'}), '(inputs=x, is_training=training, reuse=None)\n', (3564, 3608), False, 'from tensorflow.contrib.layers import batch_norm, flatten\n'), ((3642, 3696), 'tensorflow.contrib.layers.batch_norm', 'batch_norm', ([], {'inputs': 'x', 'is_training': 'training', 'reuse': '(True)'}), '(inputs=x, is_training=training, reuse=True)\n', (3652, 3696), False, 'from tensorflow.contrib.layers import batch_norm, flatten\n'), ((10780, 10837), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""test_loss"""', 'simple_value': 'test_loss'}), "(tag='test_loss', simple_value=test_loss)\n", (10796, 10837), True, 'import tensorflow as tf\n'), ((10885, 10950), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""test_accuracy"""', 'simple_value': 'test_accuracy'}), "(tag='test_accuracy', simple_value=test_accuracy)\n", (10901, 10950), True, 'import tensorflow as tf\n'), ((11354, 11365), 'time.time', 'time.time', ([], {}), '()\n', (11363, 11365), False, 'import time\n')] |
import numpy as np
class AutoDiff():
"""
Forward Mode Implementation of Automatic Differentiation
The class overloads the basic operations, including the unary operation,
and contains some elemental functions
"""
def __init__(self, val, der=1, name="not_specified"):
"""
constructor for AutoDiff class
Initializes AutoDiff object with a value, derivative and name that was passed in
and converts the type of value to numpy array for handling multiple values
converts the type of derivatives to a dictionary for handling multiple variables
INPUT
=======
val: value of the current variable
der: derivative of the current variable
name: name of the current variable
RETURNS
=======
AutoDiff object: self.val, self.der, and self.name
Example:
>>> x = AutoDiff([5,6], [1, 7], "x")
>>> print(x.val, x.der, x.name)
[5 6] {'x': array([1, 7])} x
"""
# Handle several input types of val, including float, int, list and np.ndarray
if isinstance(val, (float, int, np.int32, np.int64, np.float64)):
val = [val]
self.val = np.array(val)
elif isinstance(val, list):
self.val = np.array(val)
elif isinstance(val, np.ndarray):
self.val = val
else:
raise TypeError("Invalid Type for val! ")
# Handle several input types of val, including float, int, list and dict
if type(der) == dict:
self.der = der
elif type(der) == list:
self.der = {name: np.array(der)}
elif isinstance(der, (float, int, np.int64, np.float64)):
self.der = {name: np.array([der] * len(self.val))}
self.name = name
def get_variables(self):
"""
INPUT
=======
None
RETURNS
=======
set of variable names
Example:
>>> x = AutoDiff([5,6], [1, 7], "x")
>>> x.get_variables()
{'x'}
"""
return set(self.der.keys())
"""Basic Operations"""
def __add__(self, other):
"""
Overloads the addition operation
INPUT
=======
other: Scalar or AutoDiff Object
RETURNS
=======
AutoDiff object: A new AutoDiff object which is the result of the addition operation
performed between the AutoDiff object and the argument that was passed in
EXAMPLES
=======
>>> x = AutoDiff(5, 10, "x")
>>> f1 = x + 100
>>> print(f1.val, f1.der)
[105.] {'x': array([10])}
>>> x = AutoDiff([8, 4], [10, 11], 'x')
>>> y = AutoDiff([9, 12], [20, 33], 'y')
>>> f1 = x + y
>>> print(f1.val, f1.der["x"], f1.der["y"])
[17 16] [10 11] [20 33]
"""
temp_der = {}
if isinstance(other, (int, float)):
# Add a scalar to a AutoDiff object
return AutoDiff(self.val + float(other), self.der.copy(), self.name)
elif isinstance(other, AutoDiff):
# Add two AutoDiff objects
var_union = self.get_variables().union(other.get_variables())
temp_val = self.val + other.val
for variable in var_union:
temp_der[variable] = self.der.get(variable, 0) + other.der.get(variable, 0)
return AutoDiff(temp_val, temp_der, self.name)
else:
raise TypeError("Invalid input type!")
def __radd__(self, other):
"""
INPUT
=======
other: Scalar or AutoDiff Object
RETURNS
=======
AutoDiff object: A new AutoDiff object which is the result of the addition operation
performed between the argument that was passed in and the AutoDiff object
EXAMPLES
=======
>>> x = AutoDiff(5, 10, "x")
>>> f1 = 100 + x
>>> print(f1.val, f1.der)
[105.] {'x': array([10])}
>>> x = AutoDiff([8, 4], [10, 11], 'x')
>>> y = AutoDiff([9, 12], [20, 33], 'y')
>>> f1 = y + x
>>> print(f1.val, f1.der["x"], f1.der["y"])
[17 16] [10 11] [20 33]
"""
return self.__add__(other)
def __mul__(self, other):
"""
Overloads the multiplication operation
Inputs: Scalar or AutoDiff Instance
Returns: A new AutoDiff object which is the result of the multiplication operation
performed between the AutoDiff object and the argument that was passed in
INPUT
=======
other: Scalar or AutoDiff Object
RETURNS
=======
AutoDiff object: A new AutoDiff object which is the result of the addition operation
performed between the argument that was passed in and the AutoDiff object
EXAMPLES
=======
>>> x = AutoDiff(5, name="x")
>>> f1 = 100 * x
>>> print(f1.val, f1.der)
[500.] {'x': array([100])}
>>> x = AutoDiff([8, 4], name='x')
>>> y = AutoDiff([9, 12], name='y')
>>> f1 = y * x
>>> print(f1.val, f1.der["x"], f1.der["y"])
[72 48] [ 9 12] [8 4]
"""
temp_der = {}
if isinstance(other, (int, float)):
# Multiply a scalar to a AutoDiff object
for variable in self.get_variables():
temp_der[variable] = self.der[variable] * other
return AutoDiff(self.val * float(other), temp_der, self.name)
elif isinstance(other, AutoDiff):
# Multiply two AutoDiff objects
var_union = self.get_variables().union(other.get_variables())
for variable in var_union:
temp_der[variable] = self.val * other.der.get(variable, 0) + other.val * self.der.get(variable, 0)
return AutoDiff(self.val * other.val, temp_der, self.name)
else:
raise TypeError("Invalid input type!")
def __rmul__(self, other):
"""
INPUT
=======
other: Scalar or AutoDiff Object
RETURNS
=======
AutoDiff object: A new AutoDiff object which is the result of the multiplication operation
performed between the AutoDiff object and the argument that was passed in
EXAMPLES
=======
>>> x = AutoDiff(5, name="x")
>>> f1 = x * 5
>>> print(f1.val, f1.der)
[25.] {'x': array([5])}
>>> x = AutoDiff(5, name="x")
>>> y = AutoDiff(2, name="y")
>>> result = x * y
>>> print(result.val, result.der["x"], result.der["y"])
[10] [2] [5]
"""
return self.__mul__(other)
def __sub__(self, other):
"""
Overloads the subtraction operation
INPUT
=======
other: Scalar or AutoDiff Object
RETURNS
=======
AutoDiff object: A new AutoDiff object which is the result of the subtraction operation
performed between the AutoDiff object and the argument that was passed in
EXAMPLES
=======
>>> x = AutoDiff(5, name="x")
>>> f1 = x - 100
>>> print(f1.val, f1.der)
[-95.] {'x': array([1])}
>>> x = AutoDiff([8, 4], name='x')
>>> y = AutoDiff([9, 12], name="y")
>>> result = x - y
>>> print(result.val, result.der["x"], result.der["y"])
[-1 -8] [1 1] [-1 -1]
"""
temp_der = {}
if isinstance(other, (int, float)):
# Subtract a scalar from a AutoDiff object
return AutoDiff(self.val - float(other), self.der.copy(), self.name)
elif isinstance(other, AutoDiff):
# Subtract two AutoDiff objects
var_union = self.get_variables().union(other.get_variables())
temp_val = self.val - other.val
for variable in var_union:
temp_der[variable] = self.der.get(variable, 0) - other.der.get(variable, 0)
return AutoDiff(temp_val, temp_der, self.name)
else:
raise TypeError("Invalid input type!")
def __rsub__(self, other):
"""
INPUT
=======
other: Scalar or AutoDiff Object
RETURNS
=======
AutoDiff object: A new AutoDiff object which is the result of the subtraction operation
performed between the AutoDiff object and the argument that was passed in
EXAMPLES
=======
>>> x = AutoDiff(5, name="x")
>>> f1 = 100 - x
>>> print(f1.val, f1.der)
[95.] {'x': array([-1])}
>>> x = AutoDiff([8, 4], name='x')
>>> y = AutoDiff([9, 12], name="y")
>>> result = y - x
>>> print(result.val, result.der["x"], result.der["y"])
[1 8] [-1 -1] [1 1]
"""
return -self + other
def __pow__(self, other):
"""
Overloads the power operation
INPUT
=======
other: Scalar or AutoDiff Object
RETURNS
=======
AutoDiff object: A new AutoDiff object which is the result of the AutoDiff object being
raised to the power of the argument that was passed in
EXAMPLES
=======
>>> x = AutoDiff(2, name="x")
>>> f1 = x ** 2
>>> print(f1.val, f1.der)
[4.] {'x': array([4.])}
>>> x = AutoDiff([3, 2], name='x')
>>> y = AutoDiff([-2, 5], name='y')
>>> result = (x ** y)
>>> print(result.val, result.der["x"], result.der["y"])
[ 0.11111111 32. ] [-7.40740741e-02 8.00000000e+01] [ 0.12206803 22.18070978]
"""
temp_der = {}
if isinstance(other, (int, float)):
# An AutoDiff object powered by a scalar
temp_val = np.array([float(v) ** other for v in self.val])
for variable in self.get_variables():
curr_val = np.array([float(v) ** (other - 1) for v in self.val])
temp_der[variable] = other * np.array(curr_val) * self.der[variable]
return AutoDiff(temp_val, temp_der, self.name)
elif isinstance(other, AutoDiff):
# An AutoDiff object powered by another AutoDiff object
if len(other.val) == 1:
other_val = other.val * np.ones(self.val.shape)
elif len(other.val) != len(self.val):
raise ValueError("You must have two vectors of the same length to use power on both.")
else:
other_val = other.val[:]
var_union = self.get_variables().union(other.get_variables())
temp_val = np.array([float(v) ** (o) for v, o in zip(self.val, other_val)])
for variable in var_union:
curr_val = np.array([float(v) ** (o - 1) for v, o in zip(self.val, other_val)])
temp_der[variable] = curr_val * (other_val * self.der.get(variable, 0) +
self.val * np.log(self.val) * other.der.get(variable, 0))
return AutoDiff(temp_val, temp_der, self.name)
else:
raise TypeError("Invalid input type!")
def __rpow__(self, other):
"""
INPUT
=======
other: Scalar or AutoDiff Object
RETURNS
=======
AutoDiff object: A new AutoDiff object which is the result of the argument that was
passed in being raised to the power of the AutoDiff object
EXAMPLES
=======
>>> x = AutoDiff(2, name="x")
>>> f1 = 2 ** x
>>> print(f1.val, f1.der)
[4.] {'x': array([2.77258872])}
>>> x = AutoDiff([-3, 2], name='x')
>>> y = AutoDiff([2, 5], name='y')
>>> result = (x.__rpow__(y))
>>> print(result.val, result.der["x"], result.der["y"])
[ 0.125 25. ] [ 0.0866434 40.23594781] [-0.1875 10. ]
"""
temp_der = {}
if isinstance(other, (int, float)):
# A scalar powered by an AutoDiff object
temp_val = np.array([other ** float(v) for v in self.val])
for variable in self.get_variables():
curr_val = np.array([other ** float(v) for v in self.val])
temp_der[variable] = np.log(other) * curr_val * self.der[variable]
return AutoDiff(temp_val, temp_der, self.name)
elif isinstance(other, AutoDiff):
if len(other.val) == 1:
other_val = other.val * np.ones(self.val.shape)
elif len(other.val) != len(self.val):
raise ValueError("You must have two vectors of the same length to use power on both.")
else:
other_val = other.val[:]
var_union = self.get_variables().union(other.get_variables())
temp_val = np.array([float(o) ** float(v) for v, o in zip(self.val, other_val)])
for variable in var_union:
curr_val = np.array([float(o) ** (float(v) - 1) for v, o in zip(self.val, other_val)])
temp_der[variable] = curr_val * (other_val * self.der.get(variable, 0) * np.log(other_val) +
self.val * other.der.get(variable, 0))
return AutoDiff(temp_val, temp_der, self.name)
else:
raise TypeError("Invalid input type!")
def __truediv__(self, other):
"""
Overloads the division operation
INPUT
=======
other: Scalar or AutoDiff Object
RETURNS
=======
AutoDiff object: A new AutoDiff object which is the result of the AutoDiff
object divided by the argument that was passed in
EXAMPLES
=======
>>> x = AutoDiff(2, name="x")
>>> f1 = x / 2
>>> print(f1.val, f1.der)
[1.] {'x': array([0.5])}
>>> x = AutoDiff([16, 0], name="x")
>>> y = AutoDiff([8, -1], name="y")
>>> result = (x/y)
>>> print(result.val, result.der["x"], result.der["y"])
[ 2. -0.] [ 0.125 -1. ] [-0.25 -0. ]
"""
return self * (other ** (-1))
def __rtruediv__(self, other):
"""
INPUT
=======
other: Scalar or AutoDiff Object
RETURNS
=======
AutoDiff object: A new AutoDiff object which is the result of the AutoDiff
object divided by the argument that was passed in
EXAMPLES
=======
>>> x = AutoDiff(2, name="x")
>>> f1 = 2 / x
>>> print(f1.val, f1.der)
[1.] {'x': array([-0.5])}
>>> x = AutoDiff([16, 2], name="x")
>>> y = AutoDiff([8, -1], name="y")
>>> result = y / x
>>> print(result.val, result.der["x"], result.der["y"])
[ 0.5 -0.5] [-0.03125 0.25 ] [0.0625 0.5 ]
"""
return other * (self ** (-1))
def __neg__(self):
"""
INPUT
=======
other: Scalar or AutoDiff Object
RETURNS
=======
AutoDiff object: A new AutoDiff object which has the signs of
the value and derivative reversed
EXAMPLES
=======
>>> x = AutoDiff(2, name="x")
>>> f1 = -x
>>> print(f1.val, f1.der)
[-2] {'x': array([-1])}
"""
temp_der = {}
for variable in self.get_variables():
temp_der[variable] = -self.der.get(variable, 0)
return AutoDiff(-self.val, temp_der, self.name)
def __eq__(self, other):
"""
Overloads the equal comparision operator (==)
INPUT
=======
other: Scalar or AutoDiff Object
RETURNS
=======
If the input is scalar:
True if the length of val of self AutoDiff instance is 1 and
the value of element in self.val is same as other; False if not
If the input is AutoDiff Instance:
True if self and other AutoDiff instance have the
same values and same length of values; False if not
EXAMPLES
=======
>>> x = AutoDiff(2.0, name="x")
>>> y = 2
>>> print(x==y)
True
>>> x = AutoDiff([2.0, 4.0], name="x")
>>> y = AutoDiff([2.0, 4.0], name="y")
>>> print(x==y)
True
"""
if isinstance(other, (int, float)):
return np.array_equal(self.val, np.array([float(other)]))
elif isinstance(other, AutoDiff):
return np.array_equal(self.val, other.val)
def __ne__(self, other):
"""
Overloads the not equal comparision operator (!=)
INPUT
=======
other: Scalar or AutoDiff Object
RETURNS
=======
If the input is scalar:
True if the length of val of self AutoDiff instance is not 1 or
the value of element in self.val is different from other; False if not
If the input is AutoDiff Instance:
True if self and other AutoDiff instance have different
values or different length of values; False if not
EXAMPLES
=======
>>> x = AutoDiff(2.0, name="x")
>>> y = 3
>>> print(x!=y)
True
>>> x = AutoDiff([2.0, 4.0], name="x")
>>> y = AutoDiff([2.0], name="y")
>>> print(x!=y)
True
"""
if isinstance(other, (int, float)):
return not np.array_equal(self.val, np.array([float(other)]))
elif isinstance(other, AutoDiff):
return not np.array_equal(self.val, other.val)
def __lt__(self, other):
"""
Overloads the less than comparision operator (<)
INPUT
=======
other: Scalar or AutoDiff Object
RETURNS
=======
Return the truth value of values (x1 < x2) element-wise
EXAMPLES
=======
>>> x = AutoDiff(2.0, name="x")
>>> y = 3
>>> print(x<y)
[ True]
>>> x = AutoDiff([2.0, 4.0], name="x")
>>> y = AutoDiff([2.0, 5.0], name="y")
>>> print(x<y)
[False True]
"""
if isinstance(other, (int, float)):
if len(self.val) != 1:
raise TypeError("Please compare the variables with same number of values!")
return np.less(self.val, np.array([float(other)]))
elif isinstance(other, AutoDiff):
if len(self.val) != len(other.val):
raise TypeError("Please compare the variables with same number of values!")
return np.less(self.val, other.val)
def __le__(self, other):
"""
Overloads the less than or equal to comparision operator (<=)
INPUT
=======
other: Scalar or AutoDiff Object
RETURNS
=======
Return the truth value of values (x1 <= x2) element-wise
EXAMPLES
=======
>>> x = AutoDiff(2.0, name="x")
>>> y = 3
>>> print(x<=y)
[ True]
>>> x = AutoDiff([2.0, 4.0], name="x")
>>> y = AutoDiff([2.0, 5.0], name="y")
>>> print(x<=y)
[ True True]
"""
if isinstance(other, (int, float)):
if len(self.val) != 1:
raise TypeError("Please compare the variables with same number of values!")
return np.less_equal(self.val, np.array([float(other)]))
elif isinstance(other, AutoDiff):
if len(self.val) != len(other.val):
raise TypeError("Please compare the variables with same number of values!")
return np.less_equal(self.val, other.val)
def __gt__(self, other):
"""
Overloads the greater than comparision operator (>)
Inputs
=======
Scalar or AutoDiff Instance
Returns
=======
Return the truth value of values (x1 > x2) element-wise
EXAMPLES
=======
>>> x = AutoDiff(2.0, name="x")
>>> y = 3
>>> print(y>x)
[ True]
>>> x = AutoDiff([2.0, 4.0], name="x")
>>> y = AutoDiff([3.0, 5.0], name="y")
>>> print(y>x)
[ True True]
"""
if isinstance(other, (int, float)):
if len(self.val) != 1:
raise TypeError("Please compare the variables with same number of values!")
return np.greater(self.val, np.array([float(other)]))
elif isinstance(other, AutoDiff):
if len(self.val) != len(other.val):
raise TypeError("Please compare the variables with same number of values!")
return np.greater(self.val, other.val)
def __ge__(self, other):
"""
Overloads the greater than or equal to comparision operator (>=)
Inputs
=======
Scalar or AutoDiff Instance
Returns
=======
Return the truth value of values (x1 >= x2) element-wise
EXAMPLES
=======
>>> x = AutoDiff(2.0, name="x")
>>> y = 1
>>> print(x>=y)
[ True]
>>> x = AutoDiff([2.0, 4.0], name="x")
>>> y = AutoDiff([1.0, 3.0], name="y")
>>> print(x>=y)
[ True True]
"""
if isinstance(other, (int, float)):
if len(self.val) != 1:
raise TypeError("Please compare the variables with same number of values!")
return np.greater_equal(self.val, np.array([float(other)]))
elif isinstance(other, AutoDiff):
if len(self.val) != len(other.val):
raise TypeError("Please compare the variables with same number of values!")
return np.greater_equal(self.val, other.val)
"""Elemental Function"""
def sin(self):
"""
Elementary function sin
Inputs
=======
None
Returns
=======
A new AutoDiff object with the sine
computation done on the value and derivative
EXAMPLES
=======
>>> x = AutoDiff(2, name="x")
>>> f1 = AutoDiff.sin(x)
>>> print(f1.val, f1.der)
[0.90929743] {'x': array([-0.41614684])}
"""
temp_der = {}
new_val = np.sin(self.val)
for variable in self.get_variables():
temp_der[variable] = np.cos(self.val) * self.der[variable]
return AutoDiff(new_val, temp_der, self.name)
def sinh(self):
"""
Elementary function sinh
Inputs
=======
None
Returns
=======
A new AutoDiff object with the hyperbolic sine
computation done on the value and derivative
EXAMPLES
=======
>>> x = AutoDiff(5.0, 1.0, "x")
>>> f1 = 3 * x + 2
>>> f2 = AutoDiff.sinh(f1)
>>> print(f2.val, f2.der)
[12077476.37678763] {'x': array([36232429.13036301])}
"""
new_val = np.sinh(self.val)
temp_der = {}
for variable in self.get_variables():
temp_der[variable] = np.cosh(self.val) * self.der[variable]
return AutoDiff(new_val, temp_der, self.name)
def cos(self):
"""
Elementary function cos
Inputs
=======
None
Returns
=======
A new AutoDiff object with the cosine computation
done on the value and derivative
EXAMPLES
=======
>>> x = AutoDiff(5.0, 1.0, "x")
>>> f1 = 3 * x + 2
>>> f2 = AutoDiff.cos(f1)
>>> print(f2.val, f2.der)
[-0.27516334] {'x': array([2.88419248])}
"""
new_val = np.cos(self.val)
temp_der = {}
for variable in self.get_variables():
temp_der[variable] = -np.sin(self.val) * self.der[variable]
return AutoDiff(new_val, temp_der, self.name)
def cosh(self):
"""
Elementary function cosh
Inputs
=======
None
Returns
=======
A new AutoDiff object with the hyperbolic cosine
computation done on the value and derivative
EXAMPLES
=======
>>> x = AutoDiff(5.0, 1.0, "x")
>>> f1 = 3 * x + 2
>>> f2 = AutoDiff.cosh(f1)
>>> print(f2.val, f2.der)
[12077476.37678767] {'x': array([36232429.13036288])}
"""
new_val = np.cosh(self.val)
temp_der = {}
for variable in self.get_variables():
temp_der[variable] = np.sinh(self.val) * self.der[variable]
return AutoDiff(new_val, temp_der, self.name)
def tan(self):
"""
Elementary function tan
Inputs
=======
None
Returns
=======
A new AutoDiff object with the tangent computation
done on the value and derivative
EXAMPLES
=======
>>> x = AutoDiff(5.0, 1.0, "x")
>>> f1 = 3 * x + 2
>>> f2 = AutoDiff.tan(f1)
>>> print(f2.val, f2.der)
[3.49391565] {'x': array([39.62233961])}
"""
new_val = np.tan(self.val)
temp_der = {}
for variable in self.get_variables():
temp_der[variable] = self.der[variable] / (np.cos(self.val) ** 2)
return AutoDiff(new_val, temp_der, self.name)
def tanh(self):
"""
Elementary function tanh
Inputs
=======
None
Returns
=======
A new AutoDiff object with the hyperbolic
tangent computation done on the value and derivative
EXAMPLES
=======
>>> x = AutoDiff(5.0, 1.0, "x")
>>> f1 = 3 * x + 2
>>> f2 = AutoDiff.tanh(f1)
>>> print(f2.val, f2.der)
[1.] {'x': array([2.05669012e-14])}
"""
new_val = np.tanh(self.val)
temp_der = {}
for variable in self.get_variables():
temp_der[variable] = self.der[variable] * 1 / (np.cosh(self.val) ** 2)
return AutoDiff(new_val, temp_der, self.name)
def arcsin(self):
"""
Elemtary function arcsin
Inputs
=======
None
Returns
=======
A new AutoDiff object with the hyperbolic
arcsin computation done on the value and derivative
EXAMPLES
=======
>>> x = AutoDiff(0.5, 1.0, "x")
>>> f1 = AutoDiff.arcsin(x)
>>> print(f1.val, f1.der)
[0.52359878] {'x': array([1.15470054])}
"""
new_val = np.arcsin(self.val)
temp_der = {}
for variable in self.get_variables():
temp_der[variable] = self.der[variable] * 1 / np.sqrt(1 - self.val ** 2)
return AutoDiff(new_val, temp_der, self.name)
def arccos(self):
"""
Elementary function arccos
Inputs
=======
None
Returns
=======
A new AutoDiff object with the hyperbolic
arccos computation done on the value and derivative
EXAMPLES
=======
>>> x = AutoDiff(0.5, 1.0, "x")
>>> f1 = AutoDiff.arccos(x)
>>> print(f1.val, f1.der)
[1.04719755] {'x': array([-1.15470054])}
"""
new_val = np.arccos(self.val)
temp_der = {}
for variable in self.get_variables():
temp_der[variable] = -self.der[variable] * 1 / np.sqrt(1 - self.val ** 2)
return AutoDiff(new_val, temp_der, self.name)
def arctan(self):
"""
Elementary function arctan
Inputs
=======
None
Returns
=======
A new AutoDiff object with the hyperbolic
arctan computation done on the value and derivative
EXAMPLES
=======
>>> x = AutoDiff(0.5, 1.0, "x")
>>> f1 = AutoDiff.arctan(x)
>>> print(f1.val, f1.der)
[0.46364761] {'x': array([0.8])}
"""
new_val = np.arctan(self.val)
temp_der = {}
for variable in self.get_variables():
temp_der[variable] = self.der[variable] * 1 / ((self.val ** 2) + 1)
return AutoDiff(new_val, temp_der, self.name)
def sqrt(self):
"""
Elementary function sqrt
Inputs
=======
None
Returns
=======
A new AutoDiff object with the square root
computation done on the value and derivative
EXAMPLES
=======
>>> x = AutoDiff(0.5, 1.0, "x")
>>> f1 = AutoDiff.sqrt(x)
>>> print(f1.val, f1.der)
[0.70710678] {'x': array([0.70710678])}
"""
new_val = self.val ** (1 / 2)
temp_der = {}
for variable in self.get_variables():
temp_der[variable] = self.der[variable] * ((1 / 2) * (self.val ** (- 1 / 2)))
return AutoDiff(new_val, temp_der, self.name)
def ln(self):
"""
Elementary function ln
Inputs
=======
None
Returns
=======
A new AutoDiff object with the natural log
computation done on the value and derivative
EXAMPLES
=======
>>> x = AutoDiff(0.5, 1.0, "x")
>>> f1 = AutoDiff.ln(x)
>>> print(f1.val, f1.der)
[-0.69314718] {'x': array([2.])}
"""
new_val = np.log(self.val)
temp_der = {}
for variable in self.get_variables():
temp_der[variable] = self.der[variable] * (1 / self.val)
return AutoDiff(new_val, temp_der, self.name)
def log(self, base):
"""
Elementary function log with a scalar base
Inputs
=======
scalar
Returns
=======A new AutoDiff object with the log (using a specified
base) computation done on the value and derivative
EXAMPLES
=======
>>> x = AutoDiff(0.5, 1.0, "x")
>>> f1 = AutoDiff.log(x, 10)
>>> print(f1.val, f1.der)
[-0.30103] {'x': array([0.86858896])}
"""
new_val = np.log(self.val) / np.log(base)
temp_der = {}
for variable in self.get_variables():
temp_der[variable] = self.der[variable] * (1 / (self.val * np.log(base)))
return AutoDiff(new_val, temp_der, self.name)
def exp(self):
"""
Elementary function exp with exponential base
Inputs
=======
None
Returns
=======
A new AutoDiff object with the natural exponential
computation done on the value and derivative
EXAMPLES
=======
>>> x = AutoDiff(0.5, 1.0, "x")
>>> f1 = AutoDiff.exp(x)
>>> print(f1.val, f1.der)
[1.64872127] {'x': array([1.64872127])}
"""
new_val = np.exp(self.val)
temp_der = {}
for variable in self.get_variables():
temp_der[variable] = self.der[variable] * np.exp(self.val)
return AutoDiff(new_val, temp_der, self.name)
def exp_base(self, base):
"""
Elementary function exp with a scalr base
Inputs
=======
scalar
Returns
=======
A new AutoDiff object with the exponential (using a specified base)
computation done on the value and derivative
EXAMPLES
=======
>>> x = AutoDiff(0.5, 1.0, "x")
>>> f1 = AutoDiff.exp_base(x, 10)
>>> print(f1.val, f1.der)
[3.16227766] {'x': array([7.2814134])}
"""
new_val = np.array([base ** float(v) for v in self.val])
temp_der = {}
for variable in self.get_variables():
temp_der[variable] = self.der[variable] * (base ** self.val) * np.log(base)
return AutoDiff(new_val, temp_der, self.name)
def logistic(self):
"""
Logistic function
Inputs
=======
None
Returns
=======
A new AutoDiff object calculated with logistic function
EXAMPLES
=======
>>> x = AutoDiff(0.5, 1.0, "x")
>>> f1 = AutoDiff.logistic(x)
>>> print(f1.val, f1.der)
[0.62245933] {'x': array([0.23500371])}
"""
new_val = 1 / (1 + np.exp(-self.val))
temp_der = {}
for variable in self.get_variables():
temp_der[variable] = self.der[variable] * np.exp(self.val) / ((1 + np.exp(self.val)) ** 2)
return AutoDiff(new_val, temp_der, self.name)
| [
"numpy.greater",
"numpy.ones",
"numpy.sin",
"numpy.exp",
"numpy.arcsin",
"numpy.tan",
"numpy.less",
"numpy.arccos",
"numpy.less_equal",
"numpy.tanh",
"numpy.cos",
"numpy.cosh",
"numpy.arctan",
"numpy.greater_equal",
"numpy.log",
"numpy.array",
"numpy.array_equal",
"numpy.sinh",
"... | [((22182, 22198), 'numpy.sin', 'np.sin', (['self.val'], {}), '(self.val)\n', (22188, 22198), True, 'import numpy as np\n'), ((22884, 22901), 'numpy.sinh', 'np.sinh', (['self.val'], {}), '(self.val)\n', (22891, 22901), True, 'import numpy as np\n'), ((23585, 23601), 'numpy.cos', 'np.cos', (['self.val'], {}), '(self.val)\n', (23591, 23601), True, 'import numpy as np\n'), ((24312, 24329), 'numpy.cosh', 'np.cosh', (['self.val'], {}), '(self.val)\n', (24319, 24329), True, 'import numpy as np\n'), ((25014, 25030), 'numpy.tan', 'np.tan', (['self.val'], {}), '(self.val)\n', (25020, 25030), True, 'import numpy as np\n'), ((25730, 25747), 'numpy.tanh', 'np.tanh', (['self.val'], {}), '(self.val)\n', (25737, 25747), True, 'import numpy as np\n'), ((26431, 26450), 'numpy.arcsin', 'np.arcsin', (['self.val'], {}), '(self.val)\n', (26440, 26450), True, 'import numpy as np\n'), ((27139, 27158), 'numpy.arccos', 'np.arccos', (['self.val'], {}), '(self.val)\n', (27148, 27158), True, 'import numpy as np\n'), ((27839, 27858), 'numpy.arctan', 'np.arctan', (['self.val'], {}), '(self.val)\n', (27848, 27858), True, 'import numpy as np\n'), ((29216, 29232), 'numpy.log', 'np.log', (['self.val'], {}), '(self.val)\n', (29222, 29232), True, 'import numpy as np\n'), ((30661, 30677), 'numpy.exp', 'np.exp', (['self.val'], {}), '(self.val)\n', (30667, 30677), True, 'import numpy as np\n'), ((1223, 1236), 'numpy.array', 'np.array', (['val'], {}), '(val)\n', (1231, 1236), True, 'import numpy as np\n'), ((29926, 29942), 'numpy.log', 'np.log', (['self.val'], {}), '(self.val)\n', (29932, 29942), True, 'import numpy as np\n'), ((29945, 29957), 'numpy.log', 'np.log', (['base'], {}), '(base)\n', (29951, 29957), True, 'import numpy as np\n'), ((1296, 1309), 'numpy.array', 'np.array', (['val'], {}), '(val)\n', (1304, 1309), True, 'import numpy as np\n'), ((16470, 16505), 'numpy.array_equal', 'np.array_equal', (['self.val', 'other.val'], {}), '(self.val, other.val)\n', (16484, 16505), True, 'import numpy as np\n'), ((18533, 18561), 'numpy.less', 'np.less', (['self.val', 'other.val'], {}), '(self.val, other.val)\n', (18540, 18561), True, 'import numpy as np\n'), ((19570, 19604), 'numpy.less_equal', 'np.less_equal', (['self.val', 'other.val'], {}), '(self.val, other.val)\n', (19583, 19604), True, 'import numpy as np\n'), ((20593, 20624), 'numpy.greater', 'np.greater', (['self.val', 'other.val'], {}), '(self.val, other.val)\n', (20603, 20624), True, 'import numpy as np\n'), ((21635, 21672), 'numpy.greater_equal', 'np.greater_equal', (['self.val', 'other.val'], {}), '(self.val, other.val)\n', (21651, 21672), True, 'import numpy as np\n'), ((22278, 22294), 'numpy.cos', 'np.cos', (['self.val'], {}), '(self.val)\n', (22284, 22294), True, 'import numpy as np\n'), ((23003, 23020), 'numpy.cosh', 'np.cosh', (['self.val'], {}), '(self.val)\n', (23010, 23020), True, 'import numpy as np\n'), ((24431, 24448), 'numpy.sinh', 'np.sinh', (['self.val'], {}), '(self.val)\n', (24438, 24448), True, 'import numpy as np\n'), ((26577, 26603), 'numpy.sqrt', 'np.sqrt', (['(1 - self.val ** 2)'], {}), '(1 - self.val ** 2)\n', (26584, 26603), True, 'import numpy as np\n'), ((27286, 27312), 'numpy.sqrt', 'np.sqrt', (['(1 - self.val ** 2)'], {}), '(1 - self.val ** 2)\n', (27293, 27312), True, 'import numpy as np\n'), ((30800, 30816), 'numpy.exp', 'np.exp', (['self.val'], {}), '(self.val)\n', (30806, 30816), True, 'import numpy as np\n'), ((31590, 31602), 'numpy.log', 'np.log', (['base'], {}), '(base)\n', (31596, 31602), True, 'import numpy as np\n'), ((32095, 32112), 'numpy.exp', 'np.exp', (['(-self.val)'], {}), '(-self.val)\n', (32101, 32112), True, 'import numpy as np\n'), ((1648, 1661), 'numpy.array', 'np.array', (['der'], {}), '(der)\n', (1656, 1661), True, 'import numpy as np\n'), ((17511, 17546), 'numpy.array_equal', 'np.array_equal', (['self.val', 'other.val'], {}), '(self.val, other.val)\n', (17525, 17546), True, 'import numpy as np\n'), ((23704, 23720), 'numpy.sin', 'np.sin', (['self.val'], {}), '(self.val)\n', (23710, 23720), True, 'import numpy as np\n'), ((25154, 25170), 'numpy.cos', 'np.cos', (['self.val'], {}), '(self.val)\n', (25160, 25170), True, 'import numpy as np\n'), ((25875, 25892), 'numpy.cosh', 'np.cosh', (['self.val'], {}), '(self.val)\n', (25882, 25892), True, 'import numpy as np\n'), ((32236, 32252), 'numpy.exp', 'np.exp', (['self.val'], {}), '(self.val)\n', (32242, 32252), True, 'import numpy as np\n'), ((10040, 10058), 'numpy.array', 'np.array', (['curr_val'], {}), '(curr_val)\n', (10048, 10058), True, 'import numpy as np\n'), ((10325, 10348), 'numpy.ones', 'np.ones', (['self.val.shape'], {}), '(self.val.shape)\n', (10332, 10348), True, 'import numpy as np\n'), ((12276, 12289), 'numpy.log', 'np.log', (['other'], {}), '(other)\n', (12282, 12289), True, 'import numpy as np\n'), ((12499, 12522), 'numpy.ones', 'np.ones', (['self.val.shape'], {}), '(self.val.shape)\n', (12506, 12522), True, 'import numpy as np\n'), ((30097, 30109), 'numpy.log', 'np.log', (['base'], {}), '(base)\n', (30103, 30109), True, 'import numpy as np\n'), ((32261, 32277), 'numpy.exp', 'np.exp', (['self.val'], {}), '(self.val)\n', (32267, 32277), True, 'import numpy as np\n'), ((13133, 13150), 'numpy.log', 'np.log', (['other_val'], {}), '(other_val)\n', (13139, 13150), True, 'import numpy as np\n'), ((11007, 11023), 'numpy.log', 'np.log', (['self.val'], {}), '(self.val)\n', (11013, 11023), True, 'import numpy as np\n')] |
import os.path as osp
import sys
import torch
import torch.utils.data as data
import cv2
import random
import numpy as np
from utils.util import gaussian2D, HRSC_CLASSES, DOTA_CLASSES, tricube_kernel
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
def distance(p1, p2):
x1, y1 = p1
x2, y2 = p2
return np.sqrt((x2-x1)**2 + (y2-y1)**2)
diameter = 400
gaussian_map = tricube_kernel(diameter, 7)
gaussian_poly = np.float32([[0, 0], [0, diameter], [diameter, diameter], [diameter, 0]])
def gaussian(mask, area, box, size, label):
if type(size) is tuple:
size = size[0] * size[1]
H, W = mask.shape[:2]
x1, y1, x2, y2, x3, y3, x4, y4 = box
if x1*x2*x3*x4*y1*y2*y3*y4 < 0:
return mask, area
mask_w = max(distance([x1, y1], [x2, y2]), distance([x3, y3], [x4, y4]))
mask_h = max(distance([x3, y3], [x2, y2]), distance([x1, y1], [x4, y4]))
if mask_w > 0 and mask_h > 0:
weight_mask = np.zeros((H, W), dtype=np.float32)
mask_area = max(1, mask_w * mask_h)
img_area = size
M = cv2.getPerspectiveTransform(gaussian_poly, box.reshape((4, 2)))
dst = cv2.warpPerspective(gaussian_map, M, (H, W), flags=cv2.INTER_LINEAR)
mask_area = (img_area/mask_area)
weight_mask = cv2.fillPoly(weight_mask, box.astype(np.int32).reshape((-1,4,2)), color=mask_area)
mask[:, :, label] = np.maximum(mask[:, :, label], dst)
area[:, :, label] = np.maximum(area[:, :, label], weight_mask)
return mask, area
class HRSCAnnotationTransform(object):
"""Transforms a VOC annotation into a Tensor of bbox coords and label index
Initilized with a dictionary lookup of classnames to indexes
Arguments:
class_to_ind (dict, optional): dictionary lookup of classnames -> indexes
(default: alphabetic indexing of VOC's 20 classes)
keep_difficult (bool, optional): keep difficult instances or not
(default: False)
height (int): height
width (int): width
"""
def __init__(self, class_to_ind=None, keep_difficult=True):
self.class_to_ind = class_to_ind or dict(
zip(HRSC_CLASSES, range(len(HRSC_CLASSES))))
self.keep_difficult = keep_difficult
def __call__(self, target, width, height):
"""
Arguments:
target (annotation) : the target annotation to be made usable
will be an ET.Element
Returns:
a list containing lists of bounding boxes [bbox coords, class name]
"""
res = []
for objs in target.iter('HRSC_Objects'):
for obj in target.iter('HRSC_Object'):
difficult = int(obj.find('difficult').text) == 1
if not self.keep_difficult and difficult:
continue
#label = HRSC_CLASSES.index(obj.find('Class_ID').text)
cx = float( obj.find('mbox_cx').text )
cy = float( obj.find('mbox_cy').text )
w = float( obj.find('mbox_w').text )
h = float( obj.find('mbox_h').text )
ang = float( obj.find('mbox_ang').text ) * 180 / np.pi
box = np.array([[cx-w/2, cy-h/2], [cx-w/2, cy+h/2],
[cx+w/2, cy+h/2], [cx+w/2, cy-h/2]], dtype=np.float32)
M = cv2.getRotationMatrix2D((cx, cy), -ang, 1.0)
box = np.hstack((box, np.ones((box.shape[0],1))))
rbox = np.dot(M, box.T).T
rbox = rbox.reshape(-1)
rbb = [rbox[0]/width, rbox[1]/height,
rbox[2]/width, rbox[3]/height,
rbox[4]/width, rbox[5]/height,
rbox[6]/width, rbox[7]/height,
0]
res += [rbb]
return res # [[cx, cy, w, h, ang, label], ... ]
class ListDataset(data.Dataset):
"""VOC Detection Dataset Object
input is image, target is annotation
Arguments:
root (string): filepath to VOCdevkit folder.
image_set (string): imageset to use (eg. 'train', 'val', 'test')
transform (callable, optional): transformation to perform on the
input image
target_transform (callable, optional): transformation to perform on the
target `annotation`
(eg: take in caption string, return tensor of word indices)
dataset_name (string, optional): which dataset to load
(default: 'VOC2007')
"""
def __init__(self, root, dataset, out_size, mode, split, transform=None, evaluation=False):
self.root = root
self.out_size = out_size
self.dataset = dataset
self.mode = mode
self.split = split
self.transform = transform
self.evaluation = evaluation
self.ids = list()
if self.dataset == 'HRSC2016':
if self.mode == 'train':
self.mode = 'Train'
elif self.mode == 'test':
self.mode = 'Test'
self.load_HRSC2016_dataset()
self.num_classes = 1
elif self.dataset == 'DOTA':
if self.mode == 'train':
self.split = '1024_triple'
else:
self.split = '1024_single'
self.mode = "%s_%s" % (self.mode, self.split)
self.load_DOTA_dataset()
self.num_classes = 15 # COCO
else:
raise "only support [DOTA, HRSC2016]"
cv2.setNumThreads(0)
def load_HRSC2016_dataset(self):
if self.mode == 'Train':
image_sets='trainval'
else:
image_sets='test'
self.target_transform = HRSCAnnotationTransform()
rootpath = osp.join(self.root, 'HRSC2016')
self._annopath = osp.join(rootpath, self.mode, 'Annotations', '%s.xml')
self._voc_imgpath = osp.join(rootpath, self.mode, 'AllImages', '%s.bmp')
for line in open(osp.join(rootpath, 'ImageSets', image_sets + '.txt')):
self.ids.append(line.strip())
def load_DOTA_dataset(self):
self.target_transform = None
self._anno_path = osp.join(self.root, "DOTA", self.mode, 'labelTxt', '%s.txt')
self._coco_imgpath = osp.join(self.root, 'DOTA', self.mode, 'images', '%s.png')
dataset_list = osp.join(self.root, "DOTA", self.mode, "img_list.txt")
dataset_list = open(dataset_list, "r")
for line in dataset_list.read().splitlines():
self.ids.append(line)
self.ids = sorted(self.ids)
#self.ids = self.ids[:256]
def get_target(self, img_id):
if self.dataset == 'HRSC2016':
target = ET.parse(self._annopath % img_id).getroot()
img_path = self._voc_imgpath % img_id
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
elif self.dataset == 'DOTA':
img_path = self._coco_imgpath % (img_id)
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
size = img.shape[0]
if 'test' in self.mode:
return [], img_path, img
anno = open(self._anno_path % img_id, "r")
anno = anno.read().splitlines()
target = []
for _anno in anno:
_anno = _anno.split(" ")
if (len(_anno) < 9):
continue
#if int(_anno[9]) == 1: # ignore difficult
# continue
target.append(
[float(_anno[0])/size, float(_anno[1])/size,
float(_anno[2])/size, float(_anno[3])/size,
float(_anno[4])/size, float(_anno[5])/size,
float(_anno[6])/size, float(_anno[7])/size,
DOTA_CLASSES.index(_anno[8])]
)
else:
raise "only support [DOTA, HRSC2016]"
return target, img_path, img
def __getitem__(self, index):
data_id = self.ids[index]
target, img_path, img = self.get_target(data_id)
height, width, channels = img.shape
if self.target_transform is not None:
target = self.target_transform(target, width, height)
if self.evaluation: # evaluation mode
return img, img_path, target
mask = np.zeros((self.out_size[0], self.out_size[1], self.num_classes), dtype=np.float32)
area = np.zeros((self.out_size[0], self.out_size[1], self.num_classes), dtype=np.float32)
target = np.array(target)
boxes = target[:, :8] if target.shape[0]!=0 else None
labels = target[:, 8] if target.shape[0]!=0 else None
img, boxes, labels = self.transform(img, boxes, labels)
total_size = 1
if boxes is not None:
target_wh = np.array([self.out_size[1], self.out_size[0]], dtype=np.float32)
boxes = (boxes.clip(0, 1) * np.tile(target_wh, 4)).astype(np.float32)
labels = labels.astype(np.int32)
numobj = max(len(boxes), 1)
total_size = self.sum_of_size(boxes)
for box, label in zip(boxes, labels):
mask, area = gaussian(mask, area, box, total_size/numobj, label)
img = torch.from_numpy(img.astype(np.float32)).permute(2, 0, 1)
mask = torch.from_numpy(mask.astype(np.float32))
area = torch.from_numpy(area.astype(np.float32))
total_size = torch.from_numpy(np.array([total_size], dtype=np.float32))
return img, mask, area, total_size
def __len__(self):
return len(self.ids)
def sum_of_size(self, boxes):
size_sum = 0
for (x1, y1, x2, y2, x3, y3, x4, y4) in boxes:
if x1*x2*x3*x4*y1*y2*y3*y4 < 0:
continue
mask_w = max(distance([x1, y1], [x2, y2]), distance([x3, y3], [x4, y4]))
mask_h = max(distance([x3, y3], [x2, y2]), distance([x1, y1], [x4, y4]))
size_sum = size_sum + mask_w*mask_h
return size_sum
| [
"xml.etree.ElementTree.parse",
"cv2.warpPerspective",
"numpy.maximum",
"cv2.cvtColor",
"numpy.float32",
"utils.util.DOTA_CLASSES.index",
"numpy.zeros",
"numpy.ones",
"cv2.imread",
"numpy.array",
"numpy.tile",
"cv2.setNumThreads",
"utils.util.tricube_kernel",
"numpy.dot",
"os.path.join",
... | [((451, 478), 'utils.util.tricube_kernel', 'tricube_kernel', (['diameter', '(7)'], {}), '(diameter, 7)\n', (465, 478), False, 'from utils.util import gaussian2D, HRSC_CLASSES, DOTA_CLASSES, tricube_kernel\n'), ((495, 567), 'numpy.float32', 'np.float32', (['[[0, 0], [0, diameter], [diameter, diameter], [diameter, 0]]'], {}), '([[0, 0], [0, diameter], [diameter, diameter], [diameter, 0]])\n', (505, 567), True, 'import numpy as np\n'), ((387, 427), 'numpy.sqrt', 'np.sqrt', (['((x2 - x1) ** 2 + (y2 - y1) ** 2)'], {}), '((x2 - x1) ** 2 + (y2 - y1) ** 2)\n', (394, 427), True, 'import numpy as np\n'), ((1043, 1077), 'numpy.zeros', 'np.zeros', (['(H, W)'], {'dtype': 'np.float32'}), '((H, W), dtype=np.float32)\n', (1051, 1077), True, 'import numpy as np\n'), ((1238, 1306), 'cv2.warpPerspective', 'cv2.warpPerspective', (['gaussian_map', 'M', '(H, W)'], {'flags': 'cv2.INTER_LINEAR'}), '(gaussian_map, M, (H, W), flags=cv2.INTER_LINEAR)\n', (1257, 1306), False, 'import cv2\n'), ((1484, 1518), 'numpy.maximum', 'np.maximum', (['mask[:, :, label]', 'dst'], {}), '(mask[:, :, label], dst)\n', (1494, 1518), True, 'import numpy as np\n'), ((1547, 1589), 'numpy.maximum', 'np.maximum', (['area[:, :, label]', 'weight_mask'], {}), '(area[:, :, label], weight_mask)\n', (1557, 1589), True, 'import numpy as np\n'), ((5716, 5736), 'cv2.setNumThreads', 'cv2.setNumThreads', (['(0)'], {}), '(0)\n', (5733, 5736), False, 'import cv2\n'), ((5966, 5997), 'os.path.join', 'osp.join', (['self.root', '"""HRSC2016"""'], {}), "(self.root, 'HRSC2016')\n", (5974, 5997), True, 'import os.path as osp\n'), ((6032, 6086), 'os.path.join', 'osp.join', (['rootpath', 'self.mode', '"""Annotations"""', '"""%s.xml"""'], {}), "(rootpath, self.mode, 'Annotations', '%s.xml')\n", (6040, 6086), True, 'import os.path as osp\n'), ((6115, 6167), 'os.path.join', 'osp.join', (['rootpath', 'self.mode', '"""AllImages"""', '"""%s.bmp"""'], {}), "(rootpath, self.mode, 'AllImages', '%s.bmp')\n", (6123, 6167), True, 'import os.path as osp\n'), ((6430, 6490), 'os.path.join', 'osp.join', (['self.root', '"""DOTA"""', 'self.mode', '"""labelTxt"""', '"""%s.txt"""'], {}), "(self.root, 'DOTA', self.mode, 'labelTxt', '%s.txt')\n", (6438, 6490), True, 'import os.path as osp\n'), ((6520, 6578), 'os.path.join', 'osp.join', (['self.root', '"""DOTA"""', 'self.mode', '"""images"""', '"""%s.png"""'], {}), "(self.root, 'DOTA', self.mode, 'images', '%s.png')\n", (6528, 6578), True, 'import os.path as osp\n'), ((6602, 6656), 'os.path.join', 'osp.join', (['self.root', '"""DOTA"""', 'self.mode', '"""img_list.txt"""'], {}), "(self.root, 'DOTA', self.mode, 'img_list.txt')\n", (6610, 6656), True, 'import os.path as osp\n'), ((8806, 8893), 'numpy.zeros', 'np.zeros', (['(self.out_size[0], self.out_size[1], self.num_classes)'], {'dtype': 'np.float32'}), '((self.out_size[0], self.out_size[1], self.num_classes), dtype=np.\n float32)\n', (8814, 8893), True, 'import numpy as np\n'), ((8904, 8991), 'numpy.zeros', 'np.zeros', (['(self.out_size[0], self.out_size[1], self.num_classes)'], {'dtype': 'np.float32'}), '((self.out_size[0], self.out_size[1], self.num_classes), dtype=np.\n float32)\n', (8912, 8991), True, 'import numpy as np\n'), ((9005, 9021), 'numpy.array', 'np.array', (['target'], {}), '(target)\n', (9013, 9021), True, 'import numpy as np\n'), ((6202, 6254), 'os.path.join', 'osp.join', (['rootpath', '"""ImageSets"""', "(image_sets + '.txt')"], {}), "(rootpath, 'ImageSets', image_sets + '.txt')\n", (6210, 6254), True, 'import os.path as osp\n'), ((7139, 7159), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (7149, 7159), False, 'import cv2\n'), ((7178, 7214), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (7190, 7214), False, 'import cv2\n'), ((9306, 9370), 'numpy.array', 'np.array', (['[self.out_size[1], self.out_size[0]]'], {'dtype': 'np.float32'}), '([self.out_size[1], self.out_size[0]], dtype=np.float32)\n', (9314, 9370), True, 'import numpy as np\n'), ((9970, 10010), 'numpy.array', 'np.array', (['[total_size]'], {'dtype': 'np.float32'}), '([total_size], dtype=np.float32)\n', (9978, 10010), True, 'import numpy as np\n'), ((3319, 3456), 'numpy.array', 'np.array', (['[[cx - w / 2, cy - h / 2], [cx - w / 2, cy + h / 2], [cx + w / 2, cy + h / \n 2], [cx + w / 2, cy - h / 2]]'], {'dtype': 'np.float32'}), '([[cx - w / 2, cy - h / 2], [cx - w / 2, cy + h / 2], [cx + w / 2, \n cy + h / 2], [cx + w / 2, cy - h / 2]], dtype=np.float32)\n', (3327, 3456), True, 'import numpy as np\n'), ((3490, 3534), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(cx, cy)', '(-ang)', '(1.0)'], {}), '((cx, cy), -ang, 1.0)\n', (3513, 3534), False, 'import cv2\n'), ((7350, 7370), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (7360, 7370), False, 'import cv2\n'), ((7389, 7425), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (7401, 7425), False, 'import cv2\n'), ((3624, 3640), 'numpy.dot', 'np.dot', (['M', 'box.T'], {}), '(M, box.T)\n', (3630, 3640), True, 'import numpy as np\n'), ((7014, 7047), 'xml.etree.ElementTree.parse', 'ET.parse', (['(self._annopath % img_id)'], {}), '(self._annopath % img_id)\n', (7022, 7047), True, 'import xml.etree.ElementTree as ET\n'), ((3573, 3599), 'numpy.ones', 'np.ones', (['(box.shape[0], 1)'], {}), '((box.shape[0], 1))\n', (3580, 3599), True, 'import numpy as np\n'), ((9411, 9432), 'numpy.tile', 'np.tile', (['target_wh', '(4)'], {}), '(target_wh, 4)\n', (9418, 9432), True, 'import numpy as np\n'), ((8229, 8257), 'utils.util.DOTA_CLASSES.index', 'DOTA_CLASSES.index', (['_anno[8]'], {}), '(_anno[8])\n', (8247, 8257), False, 'from utils.util import gaussian2D, HRSC_CLASSES, DOTA_CLASSES, tricube_kernel\n')] |
#!/usr/bin/env python3
import numpy
size = 1000
randf = lambda n: numpy.random.randint(100, size=n)
x = randf(size).astype(numpy.float64)
y = randf(size).astype(numpy.float64)
result = x + y
def print_array(name, data, data_type='data_t', data_fmt='{}', fold=10):
print('static {} {}[DATA_SIZE] = {{'.format(data_type, name))
for i in range(0, len(data), fold):
print(' ', ', '.join(data_fmt.format(x) for x in data[i:i+fold]), ',', sep='')
print('};')
print('''\
#ifndef _DATASET_H
#define _DATASET_H
''')
print('#define DATA_SIZE {}'.format(size))
print('''
typedef double data_t;
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-variable"
#endif
''')
print_array('input1_data', x)
print_array('input2_data', y)
print_array('verify_data', result)
print('''
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
#endif /* _DATASET_H */''')
| [
"numpy.random.randint"
] | [((69, 102), 'numpy.random.randint', 'numpy.random.randint', (['(100)'], {'size': 'n'}), '(100, size=n)\n', (89, 102), False, 'import numpy\n')] |
import os
import tensorflow as tf
import numpy as np
from PIL import Image
tf.compat.v1.enable_eager_execution()
class Layers:
def __init__(self, num_classes, learning_rate, save_model_name='weights.npy', weights_file=None):
self.initializer = tf.initializers.glorot_uniform()
self.num_classes = num_classes
# Set adam optimizer
self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)
self.model_name = 'A'
self.save_model_name = save_model_name
if weights_file:
weight_info = np.load(os.path.join('weights', weights_file), allow_pickle=True)
# If best loss value in the numpy array, save it as a class variable along with weights
# Else just save the weights
if len(weight_info) == 13:
self.weights, self.best_loss = weight_info[:12], weight_info[12]
else:
self.weights = weight_info
self.best_loss = float('inf')
else:
# set weights initializer
initializer = tf.compat.v1.keras.initializers.glorot_uniform()
# Shapes of weight arrays of each layer
shapes = [
[7,7,3,64] ,
[1,1,64,128] ,
[3,3,128,32] ,
[1,1,96,128] ,
[3,3,128,32] ,
[1,1,128,128] ,
[3,3,128,32] ,
[1,1,160,128] ,
[3,3,128,32] ,
[1,1,192,32] ,
[93312 ,1024] ,
[1024, self.num_classes] ,
]
self.weights = []
for i in range( len( shapes ) ):
self.weights.append(self.get_weight(initializer, shapes[i], 'weight{}'.format(i)))
self.best_loss = float('inf')
# returns the weights after initializing with random distribution
def get_weight( self, initializer, shape , name ):
return tf.Variable(initializer(shape),name=name,trainable=True,dtype=tf.float32)
# Save weights at model name
def save_weights(self, explore):
if 'weights' not in os.listdir('.'):
os.mkdir('weights')
if not explore and self.update_loss():
weight_info = self.weights + [self.best_loss]
np.save(os.path.join('weights',self.save_model_name), weight_info)
else:
weight_info = self.weights
np.save(os.path.join('weights',self.save_model_name), weight_info)
# If current loss is better than best loss, update
def update_loss(self):
if self.current_loss < self.best_loss:
print('loss improved by %f, saving weights into %s' % (self.best_loss - self.current_loss, self.save_model_name))
self.best_loss = self.current_loss
return True
return False
# Return convolution layer
def conv2d(self, inputs, filters, strides, padding='VALID'):
output = tf.nn.conv2d(inputs, filters, [1, strides, strides, 1], padding=padding)
return tf.nn.leaky_relu(output, alpha=1)
# Return MaxPool2D layer
def maxpool(self, inputs, pool_size, strides, padding='VALID'):
return tf.nn.max_pool2d(inputs, ksize=pool_size, padding=padding, strides=[1, strides, strides, 1])
# Return AveragePool2D layer
def avgpool(self, inputs, pool_size, strides, padding='VALID'):
return tf.nn.avg_pool2d(inputs, ksize=pool_size, padding=padding, strides=[1, strides, strides, 1])
# Return dense layer
def dense(self, inputs, weights, dropout_rate):
x = tf.nn.leaky_relu(tf.matmul(inputs, weights), alpha=1)
if dropout_rate!=0:
return tf.nn.dropout(x, rate=dropout_rate)
return x
def predict(self, x):
# This model draws several logical concepts of Densenet Architecture
# Input tensor of size 224*224
input_tensor = tf.cast(x, dtype=tf.float32)
# Initial Convolution layer of filter size 7*7 and strides 2
initial_conv = self.conv2d(input_tensor, self.weights[0], strides=2)
# Max Pooling layer of filter size 2
max_pooling_initial = self.maxpool(initial_conv, pool_size=2, strides=1)
# Activation layer
batch_1_activ = tf.nn.relu(max_pooling_initial)
# Convolution layer of k*4 number of filters with filter size (1,1)
batch_1_conv2d_1 = self.conv2d(batch_1_activ, self.weights[1], strides=1, padding='SAME')
# Dropout Layer
batch_1_drop = tf.nn.dropout(batch_1_conv2d_1, rate=0.4)
# Convolution Layer of k number of filters with filter size (3,3)
batch_1_conv2d_2 = self.conv2d(batch_1_drop, self.weights[2], strides=1, padding='SAME')
# Concatenate the the first and second block
batch_2 = tf.concat([max_pooling_initial, batch_1_conv2d_2], axis=3)
# Activation layer
batch_2_activ = tf.nn.relu(batch_2)
# Convolution layer
batch_2_conv2d_1 = self.conv2d(batch_2_activ, self.weights[3], strides=1, padding='SAME')
# Dropout Layer
batch_2_drop = tf.nn.dropout(batch_2_conv2d_1, rate=0.4)
# Convolution Layer
batch_2_conv2d_2 = self.conv2d(batch_2_drop, self.weights[4], strides=1, padding='SAME')
# Concatenate the the first and second block
batch_3 = tf.concat([batch_2, batch_2_conv2d_2], axis=3)
# print('batch_3', batch_3.shape)
# Activation layer
batch_3_activ = tf.nn.relu(batch_3)
# Convolution layer
batch_3_conv2d_1 = self.conv2d(batch_3_activ, self.weights[5], strides=1, padding='SAME')
# Dropout Layer
batch_3_drop = tf.nn.dropout(batch_3_conv2d_1, rate=0.4)
# Convolution Layer
batch_3_conv2d_2 = self.conv2d(batch_3_drop, self.weights[6], strides=1, padding='SAME')
# Concatenate the the first and second block
batch_4 = tf.concat([batch_3, batch_3_conv2d_2], axis=3)
# Activation layer
batch_4_activ = tf.nn.relu(batch_4)
# Convolution layer
batch_4_conv2d_1 = self.conv2d(batch_4_activ, self.weights[7], strides=1, padding='SAME')
# Dropout Layer
batch_4_drop = tf.nn.dropout(batch_4_conv2d_1, rate=0.4)
# Convolution Layer
batch_4_conv2d_2 = self.conv2d(batch_4_drop, self.weights[8], strides=1, padding='SAME')
# Concatenate the the first and second block
final_batch = tf.concat([batch_4, batch_4_conv2d_2], axis=3)
# Downsampling Activation Layer
downsampling_activ = tf.nn.relu(final_batch)
# Downsampling Convolution Layer
downsampling_conv2d_1 = self.conv2d(downsampling_activ, self.weights[9], strides=1, padding='VALID')
# Average Pooling Layer
downsampling_average = self.avgpool(downsampling_conv2d_1, pool_size=2, strides=2)
# Flatten Layer
flatten = tf.reshape(downsampling_average, shape=(tf.shape(downsampling_average)[0], -1))
# Dense Layer of 1024 units
top_layer_dense_1 = self.dense(flatten, self.weights[10], dropout_rate=0.5)
# Dense Layer of num_classes units
top_layer_dense_2 = self.dense(top_layer_dense_1, self.weights[11], dropout_rate=0)
# Return Softmax value
return top_layer_dense_2
# Returns the loss of the current training step
def loss(self, pred , target, regularization_parameter):
# Sum l2 loss value of parameter for regularization
regularizer = tf.nn.l2_loss(self.weights[0])
for weight_index in range(1, len(self.weights)):
regularizer += tf.nn.l2_loss(self.weights[weight_index])
# Calculate MSE loss between predicted and expected output
mse = tf.compat.v1.losses.mean_squared_error( target , pred )
# Return MSE + regularization_parameter * regularizer_sum
return tf.reduce_mean( mse + regularizer * regularization_parameter)
# Updates the weights of the network using Adam Gradient Descent
def train_step(self, inputs, outputs ):
self.weights = list(self.weights)
with tf.GradientTape() as tape:
# Calculate loss
current_loss = self.loss( self.predict( inputs ), outputs, 1)
# compute gradient of the weights with the current loss
grads = tape.gradient( target=current_loss , sources=self.weights )
# Apply the gradients to the weights using the Adam optimizer
self.optimizer.apply_gradients( zip( grads , self.weights ) )
self.current_loss = current_loss.numpy()
print('current loss: ', self.current_loss )
return current_loss.numpy()
if __name__ == "__main__":
model = Layers(num_classes= 10, learning_rate=0.01)
# print(model.weights[0])
image = Image.open('reference/test.png')
np_image = np.asarray(image)
np_image = np_image[np.newaxis, :, :, :]
np_image = np_image/255.0
output = model.predict(np_image)
# print(type(output))
print(output.numpy())
print(np.argmax(output))
# np.save('reference/test.npy', model.weights)
# b = np.load('test.npy', allow_pickle=True)
# print(len(b), b.shape)
# print(b[0].shape) | [
"os.mkdir",
"tensorflow.compat.v1.losses.mean_squared_error",
"tensorflow.compat.v1.keras.initializers.glorot_uniform",
"numpy.argmax",
"tensorflow.nn.max_pool2d",
"tensorflow.matmul",
"tensorflow.initializers.glorot_uniform",
"tensorflow.nn.conv2d",
"tensorflow.nn.leaky_relu",
"os.path.join",
"... | [((76, 113), 'tensorflow.compat.v1.enable_eager_execution', 'tf.compat.v1.enable_eager_execution', ([], {}), '()\n', (111, 113), True, 'import tensorflow as tf\n'), ((8840, 8872), 'PIL.Image.open', 'Image.open', (['"""reference/test.png"""'], {}), "('reference/test.png')\n", (8850, 8872), False, 'from PIL import Image\n'), ((8888, 8905), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (8898, 8905), True, 'import numpy as np\n'), ((258, 290), 'tensorflow.initializers.glorot_uniform', 'tf.initializers.glorot_uniform', ([], {}), '()\n', (288, 290), True, 'import tensorflow as tf\n'), ((384, 445), 'tensorflow.compat.v1.train.AdamOptimizer', 'tf.compat.v1.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (416, 445), True, 'import tensorflow as tf\n'), ((2978, 3050), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['inputs', 'filters', '[1, strides, strides, 1]'], {'padding': 'padding'}), '(inputs, filters, [1, strides, strides, 1], padding=padding)\n', (2990, 3050), True, 'import tensorflow as tf\n'), ((3066, 3099), 'tensorflow.nn.leaky_relu', 'tf.nn.leaky_relu', (['output'], {'alpha': '(1)'}), '(output, alpha=1)\n', (3082, 3099), True, 'import tensorflow as tf\n'), ((3213, 3309), 'tensorflow.nn.max_pool2d', 'tf.nn.max_pool2d', (['inputs'], {'ksize': 'pool_size', 'padding': 'padding', 'strides': '[1, strides, strides, 1]'}), '(inputs, ksize=pool_size, padding=padding, strides=[1,\n strides, strides, 1])\n', (3229, 3309), True, 'import tensorflow as tf\n'), ((3423, 3519), 'tensorflow.nn.avg_pool2d', 'tf.nn.avg_pool2d', (['inputs'], {'ksize': 'pool_size', 'padding': 'padding', 'strides': '[1, strides, strides, 1]'}), '(inputs, ksize=pool_size, padding=padding, strides=[1,\n strides, strides, 1])\n', (3439, 3519), True, 'import tensorflow as tf\n'), ((3931, 3959), 'tensorflow.cast', 'tf.cast', (['x'], {'dtype': 'tf.float32'}), '(x, dtype=tf.float32)\n', (3938, 3959), True, 'import tensorflow as tf\n'), ((4284, 4315), 'tensorflow.nn.relu', 'tf.nn.relu', (['max_pooling_initial'], {}), '(max_pooling_initial)\n', (4294, 4315), True, 'import tensorflow as tf\n'), ((4537, 4578), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['batch_1_conv2d_1'], {'rate': '(0.4)'}), '(batch_1_conv2d_1, rate=0.4)\n', (4550, 4578), True, 'import tensorflow as tf\n'), ((4822, 4880), 'tensorflow.concat', 'tf.concat', (['[max_pooling_initial, batch_1_conv2d_2]'], {'axis': '(3)'}), '([max_pooling_initial, batch_1_conv2d_2], axis=3)\n', (4831, 4880), True, 'import tensorflow as tf\n'), ((4941, 4960), 'tensorflow.nn.relu', 'tf.nn.relu', (['batch_2'], {}), '(batch_2)\n', (4951, 4960), True, 'import tensorflow as tf\n'), ((5134, 5175), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['batch_2_conv2d_1'], {'rate': '(0.4)'}), '(batch_2_conv2d_1, rate=0.4)\n', (5147, 5175), True, 'import tensorflow as tf\n'), ((5373, 5419), 'tensorflow.concat', 'tf.concat', (['[batch_2, batch_2_conv2d_2]'], {'axis': '(3)'}), '([batch_2, batch_2_conv2d_2], axis=3)\n', (5382, 5419), True, 'import tensorflow as tf\n'), ((5514, 5533), 'tensorflow.nn.relu', 'tf.nn.relu', (['batch_3'], {}), '(batch_3)\n', (5524, 5533), True, 'import tensorflow as tf\n'), ((5707, 5748), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['batch_3_conv2d_1'], {'rate': '(0.4)'}), '(batch_3_conv2d_1, rate=0.4)\n', (5720, 5748), True, 'import tensorflow as tf\n'), ((5946, 5992), 'tensorflow.concat', 'tf.concat', (['[batch_3, batch_3_conv2d_2]'], {'axis': '(3)'}), '([batch_3, batch_3_conv2d_2], axis=3)\n', (5955, 5992), True, 'import tensorflow as tf\n'), ((6045, 6064), 'tensorflow.nn.relu', 'tf.nn.relu', (['batch_4'], {}), '(batch_4)\n', (6055, 6064), True, 'import tensorflow as tf\n'), ((6238, 6279), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['batch_4_conv2d_1'], {'rate': '(0.4)'}), '(batch_4_conv2d_1, rate=0.4)\n', (6251, 6279), True, 'import tensorflow as tf\n'), ((6481, 6527), 'tensorflow.concat', 'tf.concat', (['[batch_4, batch_4_conv2d_2]'], {'axis': '(3)'}), '([batch_4, batch_4_conv2d_2], axis=3)\n', (6490, 6527), True, 'import tensorflow as tf\n'), ((6598, 6621), 'tensorflow.nn.relu', 'tf.nn.relu', (['final_batch'], {}), '(final_batch)\n', (6608, 6621), True, 'import tensorflow as tf\n'), ((7537, 7567), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['self.weights[0]'], {}), '(self.weights[0])\n', (7550, 7567), True, 'import tensorflow as tf\n'), ((7775, 7827), 'tensorflow.compat.v1.losses.mean_squared_error', 'tf.compat.v1.losses.mean_squared_error', (['target', 'pred'], {}), '(target, pred)\n', (7813, 7827), True, 'import tensorflow as tf\n'), ((7912, 7972), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(mse + regularizer * regularization_parameter)'], {}), '(mse + regularizer * regularization_parameter)\n', (7926, 7972), True, 'import tensorflow as tf\n'), ((9080, 9097), 'numpy.argmax', 'np.argmax', (['output'], {}), '(output)\n', (9089, 9097), True, 'import numpy as np\n'), ((1087, 1135), 'tensorflow.compat.v1.keras.initializers.glorot_uniform', 'tf.compat.v1.keras.initializers.glorot_uniform', ([], {}), '()\n', (1133, 1135), True, 'import tensorflow as tf\n'), ((2145, 2160), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (2155, 2160), False, 'import os\n'), ((2174, 2193), 'os.mkdir', 'os.mkdir', (['"""weights"""'], {}), "('weights')\n", (2182, 2193), False, 'import os\n'), ((3627, 3653), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'weights'], {}), '(inputs, weights)\n', (3636, 3653), True, 'import tensorflow as tf\n'), ((3711, 3746), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['x'], {'rate': 'dropout_rate'}), '(x, rate=dropout_rate)\n', (3724, 3746), True, 'import tensorflow as tf\n'), ((7652, 7693), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['self.weights[weight_index]'], {}), '(self.weights[weight_index])\n', (7665, 7693), True, 'import tensorflow as tf\n'), ((8143, 8160), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (8158, 8160), True, 'import tensorflow as tf\n'), ((583, 620), 'os.path.join', 'os.path.join', (['"""weights"""', 'weights_file'], {}), "('weights', weights_file)\n", (595, 620), False, 'import os\n'), ((2320, 2365), 'os.path.join', 'os.path.join', (['"""weights"""', 'self.save_model_name'], {}), "('weights', self.save_model_name)\n", (2332, 2365), False, 'import os\n'), ((2452, 2497), 'os.path.join', 'os.path.join', (['"""weights"""', 'self.save_model_name'], {}), "('weights', self.save_model_name)\n", (2464, 2497), False, 'import os\n'), ((6978, 7008), 'tensorflow.shape', 'tf.shape', (['downsampling_average'], {}), '(downsampling_average)\n', (6986, 7008), True, 'import tensorflow as tf\n')] |
import functools
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import arviz as az
import natsort
from autocorr import AutoCorrTime
def _subset_quantile(dataset: az.InferenceData, q) -> az.InferenceData:
"""Get q'th quantile of the dataset by discriminator probability."""
idx = np.flip(np.argsort(dataset.posterior["D"].values[0]))
top_n = int(q * len(dataset.posterior.draw))
datadict = {
p: dataset.posterior[p].values[:, idx[:top_n]] for p in dataset.posterior.keys()
}
return az.convert_to_inference_data(datadict)
def _subset_threshold(dataset: az.InferenceData, x: float) -> az.InferenceData:
"""Get subset of the dataset with discriminator probability > x."""
idx = [j for j, D in enumerate(dataset.posterior["D"].values[0]) if D > x]
datadict = {
p: dataset.posterior[p].values[:, idx] for p in dataset.posterior.keys()
}
return az.convert_to_inference_data(datadict)
def _subset_slice(dataset: az.InferenceData, slice_: slice) -> az.InferenceData:
"""Select a slice of the data points in each chain."""
datadict = {
p: dataset.posterior[p].values[:, slice_] for p in dataset.posterior.keys()
}
return az.convert_to_inference_data(datadict)
def _fig_from_arviz_axes(axes):
if hasattr(axes, "flat"):
# axes is an ndarray of matplotlib.axes.Axes
fig = axes.flat[0].figure
else:
# axes is a matplotlib.axes.Axes
fig = axes.figure
return fig
def iter_chains(dataset: az.InferenceData):
num_chains = len(dataset.posterior.chain)
for j in range(num_chains):
datadict = {p: dataset.posterior[p].values[j] for p in dataset.posterior.keys()}
yield az.convert_to_inference_data(datadict)
def plot_pair(dataset, aspect=9 / 16, scale=1):
var_names = [k for k in dataset.posterior.data_vars.keys() if k != "D"]
fig, ax = plt.subplots(
len(var_names),
len(var_names),
figsize=scale * plt.figaspect(aspect),
tight_layout=True,
squeeze=False,
)
az.plot_pair(
dataset,
var_names=var_names,
ax=ax,
kind=["scatter", "kde"],
marginals=True,
scatter_kwargs=dict(rasterized=True),
)
ax[1, 0].scatter(10000, 200, c="red", marker="x", zorder=5)
return fig
def plot_abc_quantile(dataset):
dataset = _subset_quantile(dataset, 0.001)
fig = plot_pair(dataset)
fig.suptitle("0.1% largest D(x)")
return fig
def plot_abc_threshold(dataset):
dataset = _subset_threshold(dataset, 0.99)
fig = plot_pair(dataset)
fig.suptitle("D(x) > 0.99")
return fig
def plot_abc_D(dataset, aspect=9 / 16, scale=1):
fig, ax = plt.subplots(
figsize=scale * plt.figaspect(aspect),
tight_layout=True,
)
N0 = np.concatenate(dataset.posterior["N0"])
N1 = np.concatenate(dataset.posterior["N1"])
D = np.concatenate(dataset.posterior["D"])
sc = ax.scatter(N0, N1, c=D, vmin=0, vmax=1, rasterized=True, s=2)
fig.colorbar(sc)
ax.set_title("D(x)")
ax.set_xlabel("N0")
ax.set_ylabel("N1")
ax.scatter(10000, 200, c="red", marker="x")
return fig
def plot_mcmc_trace(dataset, kind, aspect=9 / 16, scale=1):
figsize = scale * plt.figaspect(aspect)
axes = az.plot_trace(
dataset,
var_names=("[^D]"),
filter_vars="regex",
figsize=figsize,
kind=kind,
combined=False,
compact=False,
trace_kwargs=dict(rasterized=True),
)
fig = _fig_from_arviz_axes(axes)
fig.set_constrained_layout(False)
fig.set_tight_layout(True)
return fig
def plot_mcmc_autocorr(dataset, aspect=9 / 16, scale=1):
figsize = scale * plt.figaspect(aspect)
axes = az.plot_autocorr(
dataset,
figsize=figsize,
combined=False,
var_names=("[^D]"),
filter_vars="regex",
)
fig = _fig_from_arviz_axes(axes)
fig.set_constrained_layout(False)
fig.set_tight_layout(True)
return fig
def plot_mcmc_ess(dataset, kind, aspect=9 / 16, scale=1):
figsize = scale * plt.figaspect(aspect)
axes = az.plot_ess(
dataset,
kind=kind,
figsize=figsize,
var_names=("[^D]"),
filter_vars="regex",
)
fig = _fig_from_arviz_axes(axes)
fig.set_constrained_layout(False)
fig.set_tight_layout(True)
return fig
def plot_mcmc_iat(dataset, aspect=9 / 16, scale=1):
var_names = [k for k in dataset.posterior.data_vars.keys() if k != "D"]
samples = np.array([dataset.posterior[k] for k in var_names])
samples = samples.swapaxes(0, 2)
nsteps, nwalkers, nvars = samples.shape
# assert nsteps >= 100
var_names = [k for k in dataset.posterior.data_vars.keys() if k != "D"]
fig, axes = plt.subplots(
nrows=1,
ncols=len(var_names),
figsize=scale * plt.figaspect(aspect),
tight_layout=True,
squeeze=False,
)
N = np.arange(100, nsteps, 100, dtype=int)
dfm = np.empty((len(N), len(var_names)))
gw = np.empty((len(N), len(var_names)))
mk = np.empty((len(N), len(var_names)))
for k, n in enumerate(N):
dfm[k] = AutoCorrTime(samples[:n], method="dfm")
gw[k] = AutoCorrTime(samples[:n], method="gw")
mk[k] = AutoCorrTime(samples[:n], method="mk")
for j, var_name in enumerate(var_names):
ax = axes.flat[j]
ax.plot(N, dfm[:, j], label="dfm")
ax.plot(N, gw[:, j], label="gw")
ax.plot(N, mk[:, j], label="mk")
ax.plot(N, N / 50.0, "--k", label=r"$\tau = N/50$")
ax.legend()
ax.set_title(f"Integrated autocorrelation time ({var_name})")
ax.set_xlabel("N")
ax.set_ylabel(r"IAT")
dfm_max = np.max(dfm[-1])
print("IAT", dfm_max)
return fig
def iat_max(dataset):
"""
Max integrated autocorrelation time, over all variables.
"""
var_names = [k for k in dataset.posterior.data_vars.keys() if k != "D"]
samples = np.array([dataset.posterior[k] for k in var_names])
samples = samples.swapaxes(0, 2)
# nsteps, nwalkers, nvars = samples.shape
dfm = AutoCorrTime(samples, method="dfm")
# max over all vars
dfm_max = np.max(dfm)
return int(np.ceil(dfm_max))
def ecdf(x):
xs = np.sort(x)
ys = np.arange(1, len(xs) + 1) / float(len(xs))
return xs, ys
def plot_gan_ecdf(datasets, aspect=9 / 16, scale=1):
var_names = [k for k in datasets[0].posterior.data_vars.keys()]
fig, axes = plt.subplots(
nrows=1,
ncols=len(var_names) + 1,
gridspec_kw=dict(width_ratios=[1] * len(var_names) + [0.02 * len(var_names)]),
figsize=scale * plt.figaspect(aspect),
tight_layout=True,
squeeze=False,
)
cmap = matplotlib.cm.get_cmap("coolwarm")
kx = 0 # len(datasets) // 2
norm = matplotlib.colors.Normalize(vmin=kx, vmax=len(datasets) - 1)
posteriors = {var: list() for var in var_names}
for j, dataset in enumerate(datasets[kx:], kx):
for var, ax in zip(var_names, axes.flat):
posterior = dataset.posterior[var].values.reshape(-1)
posteriors[var].append(posterior)
xs, ys = ecdf(posterior)
ax.plot(xs, ys, alpha=0.5, c=cmap(norm(j)))
for var, ax in zip(var_names, axes.flat):
posterior = np.array(posteriors[var]).reshape(-1)
xs, ys = ecdf(posterior)
ax.plot(xs, ys, "k")
ax.set_title(var)
ax.set_xlabel(var)
ax.set_ylabel(r"Pr($x<X$)")
fig.colorbar(
matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap),
cax=axes.flat[-1],
label="iteration",
)
return fig
def partial(f, *args, **kwargs):
"""
Apply functools.update_wrapper to the functools.partial.
"""
return functools.update_wrapper(functools.partial(f, *args, **kwargs), f)
def load_ncf_multi(filenames):
filenames = natsort.natsorted(filenames)
return [az.from_netcdf(filename) for filename in filenames]
if __name__ == "__main__":
import sys
if len(sys.argv) < 4:
print(
f"usage: {sys.argv[0]} {{abc|mcmc|gan}} report.pdf data.ncf [... dataN.ncf]"
)
exit(1)
subcommand = sys.argv[1]
if subcommand not in ("abc", "mcmc", "gan"):
raise RuntimeError(f"Not a valid subcommand: '{subcommand}'")
output_filename = sys.argv[2]
if subcommand == "gan":
input_filenames = sys.argv[3:]
dataset = load_ncf_multi(input_filenames)
else:
input_filename = sys.argv[3]
dataset = az.from_netcdf(input_filename)
with PdfPages(output_filename) as pdf:
if subcommand == "abc":
funcs = [plot_abc_D, plot_abc_threshold, plot_abc_quantile]
elif subcommand == "mcmc":
funcs = [
# partial(plot_mcmc_ess, kind="quantile"),
# partial(plot_mcmc_ess, kind="evolution"),
plot_mcmc_iat,
plot_mcmc_autocorr,
plot_pair,
partial(plot_mcmc_trace, kind="trace"),
# partial(plot_mcmc_trace, kind="rank_bars"),
]
elif subcommand == "gan":
funcs = [
plot_gan_ecdf,
]
pass
for func in funcs:
# print("pre", func.__name__)
fig = func(dataset)
# print("post", func.__name__)
pdf.savefig(figure=fig, dpi=200)
plt.close(fig)
if subcommand == "mcmc":
for chaindataset in iter_chains(dataset):
fig = plot_mcmc_trace(chaindataset, kind="trace")
pdf.savefig(figure=fig, dpi=200)
plt.close(fig)
| [
"matplotlib.pyplot.figaspect",
"matplotlib.backends.backend_pdf.PdfPages",
"matplotlib.cm.get_cmap",
"numpy.argsort",
"numpy.arange",
"arviz.plot_autocorr",
"matplotlib.cm.ScalarMappable",
"matplotlib.pyplot.close",
"arviz.from_netcdf",
"numpy.max",
"autocorr.AutoCorrTime",
"functools.partial"... | [((592, 630), 'arviz.convert_to_inference_data', 'az.convert_to_inference_data', (['datadict'], {}), '(datadict)\n', (620, 630), True, 'import arviz as az\n'), ((979, 1017), 'arviz.convert_to_inference_data', 'az.convert_to_inference_data', (['datadict'], {}), '(datadict)\n', (1007, 1017), True, 'import arviz as az\n'), ((1278, 1316), 'arviz.convert_to_inference_data', 'az.convert_to_inference_data', (['datadict'], {}), '(datadict)\n', (1306, 1316), True, 'import arviz as az\n'), ((2888, 2927), 'numpy.concatenate', 'np.concatenate', (["dataset.posterior['N0']"], {}), "(dataset.posterior['N0'])\n", (2902, 2927), True, 'import numpy as np\n'), ((2937, 2976), 'numpy.concatenate', 'np.concatenate', (["dataset.posterior['N1']"], {}), "(dataset.posterior['N1'])\n", (2951, 2976), True, 'import numpy as np\n'), ((2985, 3023), 'numpy.concatenate', 'np.concatenate', (["dataset.posterior['D']"], {}), "(dataset.posterior['D'])\n", (2999, 3023), True, 'import numpy as np\n'), ((3836, 3937), 'arviz.plot_autocorr', 'az.plot_autocorr', (['dataset'], {'figsize': 'figsize', 'combined': '(False)', 'var_names': '"""[^D]"""', 'filter_vars': '"""regex"""'}), "(dataset, figsize=figsize, combined=False, var_names='[^D]',\n filter_vars='regex')\n", (3852, 3937), True, 'import arviz as az\n'), ((4219, 4310), 'arviz.plot_ess', 'az.plot_ess', (['dataset'], {'kind': 'kind', 'figsize': 'figsize', 'var_names': '"""[^D]"""', 'filter_vars': '"""regex"""'}), "(dataset, kind=kind, figsize=figsize, var_names='[^D]',\n filter_vars='regex')\n", (4230, 4310), True, 'import arviz as az\n'), ((4621, 4672), 'numpy.array', 'np.array', (['[dataset.posterior[k] for k in var_names]'], {}), '([dataset.posterior[k] for k in var_names])\n', (4629, 4672), True, 'import numpy as np\n'), ((5047, 5085), 'numpy.arange', 'np.arange', (['(100)', 'nsteps', '(100)'], {'dtype': 'int'}), '(100, nsteps, 100, dtype=int)\n', (5056, 5085), True, 'import numpy as np\n'), ((5835, 5850), 'numpy.max', 'np.max', (['dfm[-1]'], {}), '(dfm[-1])\n', (5841, 5850), True, 'import numpy as np\n'), ((6084, 6135), 'numpy.array', 'np.array', (['[dataset.posterior[k] for k in var_names]'], {}), '([dataset.posterior[k] for k in var_names])\n', (6092, 6135), True, 'import numpy as np\n'), ((6229, 6264), 'autocorr.AutoCorrTime', 'AutoCorrTime', (['samples'], {'method': '"""dfm"""'}), "(samples, method='dfm')\n", (6241, 6264), False, 'from autocorr import AutoCorrTime\n'), ((6303, 6314), 'numpy.max', 'np.max', (['dfm'], {}), '(dfm)\n', (6309, 6314), True, 'import numpy as np\n'), ((6372, 6382), 'numpy.sort', 'np.sort', (['x'], {}), '(x)\n', (6379, 6382), True, 'import numpy as np\n'), ((6858, 6892), 'matplotlib.cm.get_cmap', 'matplotlib.cm.get_cmap', (['"""coolwarm"""'], {}), "('coolwarm')\n", (6880, 6892), False, 'import matplotlib\n'), ((8009, 8037), 'natsort.natsorted', 'natsort.natsorted', (['filenames'], {}), '(filenames)\n', (8026, 8037), False, 'import natsort\n'), ((374, 418), 'numpy.argsort', 'np.argsort', (["dataset.posterior['D'].values[0]"], {}), "(dataset.posterior['D'].values[0])\n", (384, 418), True, 'import numpy as np\n'), ((3338, 3359), 'matplotlib.pyplot.figaspect', 'plt.figaspect', (['aspect'], {}), '(aspect)\n', (3351, 3359), True, 'import matplotlib.pyplot as plt\n'), ((3803, 3824), 'matplotlib.pyplot.figaspect', 'plt.figaspect', (['aspect'], {}), '(aspect)\n', (3816, 3824), True, 'import matplotlib.pyplot as plt\n'), ((4186, 4207), 'matplotlib.pyplot.figaspect', 'plt.figaspect', (['aspect'], {}), '(aspect)\n', (4199, 4207), True, 'import matplotlib.pyplot as plt\n'), ((5266, 5305), 'autocorr.AutoCorrTime', 'AutoCorrTime', (['samples[:n]'], {'method': '"""dfm"""'}), "(samples[:n], method='dfm')\n", (5278, 5305), False, 'from autocorr import AutoCorrTime\n'), ((5322, 5360), 'autocorr.AutoCorrTime', 'AutoCorrTime', (['samples[:n]'], {'method': '"""gw"""'}), "(samples[:n], method='gw')\n", (5334, 5360), False, 'from autocorr import AutoCorrTime\n'), ((5377, 5415), 'autocorr.AutoCorrTime', 'AutoCorrTime', (['samples[:n]'], {'method': '"""mk"""'}), "(samples[:n], method='mk')\n", (5389, 5415), False, 'from autocorr import AutoCorrTime\n'), ((6330, 6346), 'numpy.ceil', 'np.ceil', (['dfm_max'], {}), '(dfm_max)\n', (6337, 6346), True, 'import numpy as np\n'), ((7642, 7692), 'matplotlib.cm.ScalarMappable', 'matplotlib.cm.ScalarMappable', ([], {'norm': 'norm', 'cmap': 'cmap'}), '(norm=norm, cmap=cmap)\n', (7670, 7692), False, 'import matplotlib\n'), ((7918, 7955), 'functools.partial', 'functools.partial', (['f', '*args'], {}), '(f, *args, **kwargs)\n', (7935, 7955), False, 'import functools\n'), ((8050, 8074), 'arviz.from_netcdf', 'az.from_netcdf', (['filename'], {}), '(filename)\n', (8064, 8074), True, 'import arviz as az\n'), ((8669, 8699), 'arviz.from_netcdf', 'az.from_netcdf', (['input_filename'], {}), '(input_filename)\n', (8683, 8699), True, 'import arviz as az\n'), ((8710, 8735), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['output_filename'], {}), '(output_filename)\n', (8718, 8735), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((1787, 1825), 'arviz.convert_to_inference_data', 'az.convert_to_inference_data', (['datadict'], {}), '(datadict)\n', (1815, 1825), True, 'import arviz as az\n'), ((9570, 9584), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (9579, 9584), True, 'import matplotlib.pyplot as plt\n'), ((2052, 2073), 'matplotlib.pyplot.figaspect', 'plt.figaspect', (['aspect'], {}), '(aspect)\n', (2065, 2073), True, 'import matplotlib.pyplot as plt\n'), ((2823, 2844), 'matplotlib.pyplot.figaspect', 'plt.figaspect', (['aspect'], {}), '(aspect)\n', (2836, 2844), True, 'import matplotlib.pyplot as plt\n'), ((4959, 4980), 'matplotlib.pyplot.figaspect', 'plt.figaspect', (['aspect'], {}), '(aspect)\n', (4972, 4980), True, 'import matplotlib.pyplot as plt\n'), ((6768, 6789), 'matplotlib.pyplot.figaspect', 'plt.figaspect', (['aspect'], {}), '(aspect)\n', (6781, 6789), True, 'import matplotlib.pyplot as plt\n'), ((7425, 7450), 'numpy.array', 'np.array', (['posteriors[var]'], {}), '(posteriors[var])\n', (7433, 7450), True, 'import numpy as np\n'), ((9804, 9818), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (9813, 9818), True, 'import matplotlib.pyplot as plt\n')] |
"""
Copyright (c) 2018-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from .base_representation import BaseRepresentation
from .classification_representation import (
ClassificationAnnotation, SequenceClassificationAnnotation, MultiLabelClassificationAnnotation
)
class MachineTranslationRepresentation(BaseRepresentation):
pass
class MachineTranslationAnnotation(MachineTranslationRepresentation):
def __init__(self, identifier, source='', reference=''):
super().__init__(identifier)
self.source = source
self.reference = reference
class MachineTranslationPrediction(MachineTranslationRepresentation):
def __init__(self, identifier, translation=''):
super().__init__(identifier)
self.translation = translation
class LanguageModeling(BaseRepresentation):
def __init__(self, identifier=''):
super().__init__(identifier)
class LanguageModelingAnnotation(LanguageModeling):
def __init__(self, identifier, unique_id, input_ids, tokens, labels=None):
super().__init__(identifier)
self.unique_id = unique_id
self.tokens = tokens
self.input_ids = input_ids
self.labels = labels if labels is not None else []
class LanguageModelingPrediction(LanguageModeling):
def __init__(self, identifier, logits):
super().__init__(identifier)
self.logits = logits
class QuestionAnswering(BaseRepresentation):
def __init__(self, identifier=''):
super().__init__(identifier)
class QuestionAnsweringAnnotation(QuestionAnswering):
def __init__(self, identifier, question_id, unique_id,
input_ids, input_mask, segment_ids, position_ids,
cls_index, p_mask,
orig_answer_text=None, paragraph_text=None, doc_tokens=None,
is_impossible=False, paragraph_len=None,
tokens=None, token_is_max_context=None, token_to_orig_map=None):
super().__init__(identifier)
self.orig_answer_text = orig_answer_text if orig_answer_text is not None else ''
self.question_id = question_id
self.unique_id = unique_id
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.position_ids = position_ids
self.cls_index = cls_index
self.tokens = tokens
self.p_mask = p_mask
self.paragraph_text = paragraph_text if paragraph_text is not None else ''
self.doc_tokens = doc_tokens if doc_tokens is not None else []
self.is_impossible = is_impossible
self.paragraph_len = paragraph_len
self.token_is_max_context = token_is_max_context
self.token_to_orig_map = token_to_orig_map
class QuestionAnsweringPrediction(QuestionAnswering):
def __init__(self, identifier, start_logits=None, end_logits=None, start_index=None, end_index=None, tokens=None):
super().__init__(identifier)
self.start_logits = start_logits if start_logits is not None else []
self.end_logits = end_logits if end_logits is not None else []
self.start_index = start_index if start_index is not None else []
self.end_index = end_index if end_index is not None else []
self.tokens = tokens if tokens is not None else []
class QuestionAnsweringEmbeddingAnnotation(QuestionAnswering):
def __init__(self, identifier, input_ids, input_mask, segment_ids, position_ids, context_pos_identifier):
super().__init__(identifier)
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.position_ids = position_ids
self.context_pos_indetifier = context_pos_identifier
class QuestionAnsweringEmbeddingPrediction(QuestionAnswering):
def __init__(self, identifier, embedding):
super().__init__(identifier)
self.embedding = embedding
class QuestionAnsweringBiDAFAnnotation(QuestionAnswering):
def __init__(self, identifier, title, context, query, answers, context_word, context_char, query_word, query_char,
question_id, words_idx_in_context):
super().__init__(identifier)
self.title = title
self.context = context
self.query = query
self.orig_answer_text = answers
self.context_word = context_word
self.context_char = context_char
self.query_word = query_word
self.query_char = query_char
self.question_id = question_id
self.words_idx_in_context = words_idx_in_context
class TextClassificationAnnotation(ClassificationAnnotation):
def __init__(self, identifier, label, input_ids, input_mask=None, segment_ids=None, tokens=None):
super().__init__(identifier, label)
self.input_ids = input_ids
self.input_mask = input_mask if input_mask is not None else []
self.segment_ids = segment_ids if segment_ids is not None else []
self.tokens = tokens if tokens is not None else []
class BERTNamedEntityRecognitionAnnotation(SequenceClassificationAnnotation):
def __init__(self, identifier, input_ids, input_mask, segment_ids, label_id, valid_ids=None, label_mask=None):
super().__init__(identifier, label_id)
self.input_ids = input_ids
self.input_mask = input_mask if input_mask is not None else []
self.segment_ids = segment_ids if segment_ids is not None else []
self.valid_ids = np.array(valid_ids, dtype=bool) if valid_ids is not None else valid_ids
self.label_mask = np.array(label_mask, dtype=bool) if label_mask is not None else label_mask
class SentenceSimilarityAnnotation(BaseRepresentation):
def __init__(
self,
identifier, idx, pair_id, similarity_score, input_ids, input_mask, segment_ids
):
super().__init__(identifier)
self.id = idx
self.pair_id = pair_id
self.input_ids = input_ids
self.similarity_score = similarity_score
self.input_mask = input_mask if input_mask is not None else []
self.segment_ids = segment_ids if segment_ids is not None else []
class MultiLabelTextClassification(MultiLabelClassificationAnnotation):
def __init__(self, identifier, label, input_ids, input_mask=None, segment_ids=None, tokens=None):
super().__init__(identifier, label)
self.input_ids = input_ids
self.input_mask = input_mask if input_mask is not None else []
self.segment_ids = segment_ids if segment_ids is not None else []
self.tokens = tokens if tokens is not None else []
| [
"numpy.array"
] | [((5980, 6011), 'numpy.array', 'np.array', (['valid_ids'], {'dtype': 'bool'}), '(valid_ids, dtype=bool)\n', (5988, 6011), True, 'import numpy as np\n'), ((6078, 6110), 'numpy.array', 'np.array', (['label_mask'], {'dtype': 'bool'}), '(label_mask, dtype=bool)\n', (6086, 6110), True, 'import numpy as np\n')] |
from IPython.core.error import UsageError
from mock import MagicMock
import numpy as np
from nose.tools import assert_equals, assert_is
import pandas as pd
from pandas.util.testing import assert_frame_equal
from sparkmagic.livyclientlib.exceptions import BadUserDataException
from sparkmagic.utils.utils import parse_argstring_or_throw, records_to_dataframe
from sparkmagic.utils.constants import SESSION_KIND_PYSPARK
def test_parse_argstring_or_throw():
parse_argstring = MagicMock(side_effect=UsageError('OOGABOOGABOOGA'))
try:
parse_argstring_or_throw(MagicMock(), MagicMock(), parse_argstring=parse_argstring)
assert False
except BadUserDataException as e:
assert_equals(str(e), str(parse_argstring.side_effect))
parse_argstring = MagicMock(side_effect=ValueError('AN UNKNOWN ERROR HAPPENED'))
try:
parse_argstring_or_throw(MagicMock(), MagicMock(), parse_argstring=parse_argstring)
assert False
except ValueError as e:
assert_is(e, parse_argstring.side_effect)
def test_records_to_dataframe_missing_value_first():
result = """{"z":100, "y":50}
{"z":25, "nullv":1.0, "y":10}"""
df = records_to_dataframe(result, SESSION_KIND_PYSPARK, True)
expected = pd.DataFrame([{'z': 100, "nullv": None, 'y': 50}, {'z':25, "nullv":1, 'y':10}], columns=['z', "nullv", 'y'])
assert_frame_equal(expected, df)
def test_records_to_dataframe_coercing():
result = """{"z":"100", "y":"2016-01-01"}
{"z":"25", "y":"2016-01-01"}"""
df = records_to_dataframe(result, SESSION_KIND_PYSPARK, True)
expected = pd.DataFrame([{'z': 100, 'y': np.datetime64("2016-01-01")}, {'z':25, 'y':np.datetime64("2016-01-01")}], columns=['z', 'y'])
assert_frame_equal(expected, df)
def test_records_to_dataframe_no_coercing():
result = """{"z":"100", "y":"2016-01-01"}
{"z":"25", "y":"2016-01-01"}"""
df = records_to_dataframe(result, SESSION_KIND_PYSPARK, False)
expected = pd.DataFrame([{'z': "100", 'y': "2016-01-01"}, {'z':"25", 'y':"2016-01-01"}], columns=['z', 'y'])
assert_frame_equal(expected, df)
def test_records_to_dataframe_missing_value_later():
result = """{"z":25, "nullv":1.0, "y":10}
{"z":100, "y":50}"""
df = records_to_dataframe(result, SESSION_KIND_PYSPARK, True)
expected = pd.DataFrame([{'z':25, "nullv":1, 'y':10}, {'z': 100, "nullv": None, 'y': 50}], columns=['z', "nullv", 'y'])
assert_frame_equal(expected, df)
| [
"pandas.DataFrame",
"pandas.util.testing.assert_frame_equal",
"numpy.datetime64",
"sparkmagic.utils.utils.records_to_dataframe",
"nose.tools.assert_is",
"mock.MagicMock",
"IPython.core.error.UsageError"
] | [((1178, 1234), 'sparkmagic.utils.utils.records_to_dataframe', 'records_to_dataframe', (['result', 'SESSION_KIND_PYSPARK', '(True)'], {}), '(result, SESSION_KIND_PYSPARK, True)\n', (1198, 1234), False, 'from sparkmagic.utils.utils import parse_argstring_or_throw, records_to_dataframe\n'), ((1250, 1365), 'pandas.DataFrame', 'pd.DataFrame', (["[{'z': 100, 'nullv': None, 'y': 50}, {'z': 25, 'nullv': 1, 'y': 10}]"], {'columns': "['z', 'nullv', 'y']"}), "([{'z': 100, 'nullv': None, 'y': 50}, {'z': 25, 'nullv': 1, 'y':\n 10}], columns=['z', 'nullv', 'y'])\n", (1262, 1365), True, 'import pandas as pd\n'), ((1363, 1395), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['expected', 'df'], {}), '(expected, df)\n', (1381, 1395), False, 'from pandas.util.testing import assert_frame_equal\n'), ((1532, 1588), 'sparkmagic.utils.utils.records_to_dataframe', 'records_to_dataframe', (['result', 'SESSION_KIND_PYSPARK', '(True)'], {}), '(result, SESSION_KIND_PYSPARK, True)\n', (1552, 1588), False, 'from sparkmagic.utils.utils import parse_argstring_or_throw, records_to_dataframe\n'), ((1732, 1764), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['expected', 'df'], {}), '(expected, df)\n', (1750, 1764), False, 'from pandas.util.testing import assert_frame_equal\n'), ((1904, 1961), 'sparkmagic.utils.utils.records_to_dataframe', 'records_to_dataframe', (['result', 'SESSION_KIND_PYSPARK', '(False)'], {}), '(result, SESSION_KIND_PYSPARK, False)\n', (1924, 1961), False, 'from sparkmagic.utils.utils import parse_argstring_or_throw, records_to_dataframe\n'), ((1977, 2080), 'pandas.DataFrame', 'pd.DataFrame', (["[{'z': '100', 'y': '2016-01-01'}, {'z': '25', 'y': '2016-01-01'}]"], {'columns': "['z', 'y']"}), "([{'z': '100', 'y': '2016-01-01'}, {'z': '25', 'y':\n '2016-01-01'}], columns=['z', 'y'])\n", (1989, 2080), True, 'import pandas as pd\n'), ((2079, 2111), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['expected', 'df'], {}), '(expected, df)\n', (2097, 2111), False, 'from pandas.util.testing import assert_frame_equal\n'), ((2248, 2304), 'sparkmagic.utils.utils.records_to_dataframe', 'records_to_dataframe', (['result', 'SESSION_KIND_PYSPARK', '(True)'], {}), '(result, SESSION_KIND_PYSPARK, True)\n', (2268, 2304), False, 'from sparkmagic.utils.utils import parse_argstring_or_throw, records_to_dataframe\n'), ((2320, 2435), 'pandas.DataFrame', 'pd.DataFrame', (["[{'z': 25, 'nullv': 1, 'y': 10}, {'z': 100, 'nullv': None, 'y': 50}]"], {'columns': "['z', 'nullv', 'y']"}), "([{'z': 25, 'nullv': 1, 'y': 10}, {'z': 100, 'nullv': None, 'y':\n 50}], columns=['z', 'nullv', 'y'])\n", (2332, 2435), True, 'import pandas as pd\n'), ((2433, 2465), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['expected', 'df'], {}), '(expected, df)\n', (2451, 2465), False, 'from pandas.util.testing import assert_frame_equal\n'), ((502, 530), 'IPython.core.error.UsageError', 'UsageError', (['"""OOGABOOGABOOGA"""'], {}), "('OOGABOOGABOOGA')\n", (512, 530), False, 'from IPython.core.error import UsageError\n'), ((574, 585), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (583, 585), False, 'from mock import MagicMock\n'), ((587, 598), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (596, 598), False, 'from mock import MagicMock\n'), ((884, 895), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (893, 895), False, 'from mock import MagicMock\n'), ((897, 908), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (906, 908), False, 'from mock import MagicMock\n'), ((1000, 1041), 'nose.tools.assert_is', 'assert_is', (['e', 'parse_argstring.side_effect'], {}), '(e, parse_argstring.side_effect)\n', (1009, 1041), False, 'from nose.tools import assert_equals, assert_is\n'), ((1634, 1661), 'numpy.datetime64', 'np.datetime64', (['"""2016-01-01"""'], {}), "('2016-01-01')\n", (1647, 1661), True, 'import numpy as np\n'), ((1677, 1704), 'numpy.datetime64', 'np.datetime64', (['"""2016-01-01"""'], {}), "('2016-01-01')\n", (1690, 1704), True, 'import numpy as np\n')] |
""" Unit test for the Scipy GMRES linear solver. """
import unittest
import numpy as np
from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, \
DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError
from openmdao.test.converge_diverge import ConvergeDiverge, SingleDiamond, \
ConvergeDivergeGroups, SingleDiamondGrouped
from openmdao.test.sellar import SellarDerivativesGrouped
from openmdao.test.simple_comps import SimpleCompDerivMatVec, FanOut, FanIn, \
FanOutGrouped, DoubleArrayComp, \
FanInGrouped, ArrayComp2D, FanOutAllGrouped
from openmdao.test.util import assert_rel_error
from openmdao.util.options import OptionsDictionary
class TestScipyGMRES(unittest.TestCase):
def test_simple_matvec(self):
group = Group()
group.add('x_param', IndepVarComp('x', 1.0), promotes=['*'])
group.add('mycomp', SimpleCompDerivMatVec(), promotes=['x', 'y'])
prob = Problem()
prob.root = group
prob.root.ln_solver = ScipyGMRES()
prob.setup(check=False)
prob.run()
J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict')
assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)
J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict')
assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)
def test_simple_matvec_subbed(self):
group = Group()
group.add('mycomp', SimpleCompDerivMatVec(), promotes=['x', 'y'])
prob = Problem()
prob.root = Group()
prob.root.add('x_param', IndepVarComp('x', 1.0), promotes=['*'])
prob.root.add('sub', group, promotes=['*'])
prob.root.ln_solver = ScipyGMRES()
prob.setup(check=False)
prob.run()
J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict')
assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)
J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict')
assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)
J = prob.calc_gradient(['x'], ['y'], mode='fd', return_format='dict')
assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)
def test_simple_matvec_subbed_like_multipoint(self):
group = Group()
group.add('mycomp', SimpleCompDerivMatVec(), promotes=['x', 'y'])
prob = Problem()
prob.root = Group()
prob.root.add('sub', group, promotes=['*'])
prob.root.sub.add('x_param', IndepVarComp('x', 1.0), promotes=['*'])
prob.root.ln_solver = ScipyGMRES()
prob.setup(check=False)
prob.run()
J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict')
assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)
J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict')
assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)
J = prob.calc_gradient(['x'], ['y'], mode='fd', return_format='dict')
assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)
J = prob.calc_gradient(['x'], ['y'], mode='fd', return_format='array')
assert_rel_error(self, J[0][0], 2.0, 1e-6)
def test_array2D(self):
group = Group()
group.add('x_param', IndepVarComp('x', np.ones((2, 2))), promotes=['*'])
group.add('mycomp', ArrayComp2D(), promotes=['x', 'y'])
prob = Problem()
prob.root = group
prob.root.ln_solver = ScipyGMRES()
prob.setup(check=False)
prob.run()
J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict')
Jbase = prob.root.mycomp._jacobian_cache
diff = np.linalg.norm(J['y']['x'] - Jbase['y', 'x'])
assert_rel_error(self, diff, 0.0, 1e-8)
J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict')
diff = np.linalg.norm(J['y']['x'] - Jbase['y', 'x'])
assert_rel_error(self, diff, 0.0, 1e-8)
def test_double_arraycomp(self):
# Mainly testing a bug in the array return for multiple arrays
group = Group()
group.add('x_param1', IndepVarComp('x1', np.ones((2))), promotes=['*'])
group.add('x_param2', IndepVarComp('x2', np.ones((2))), promotes=['*'])
group.add('mycomp', DoubleArrayComp(), promotes=['*'])
prob = Problem()
prob.root = group
prob.root.ln_solver = ScipyGMRES()
prob.setup(check=False)
prob.run()
Jbase = group.mycomp.JJ
J = prob.calc_gradient(['x1', 'x2'], ['y1', 'y2'], mode='fwd',
return_format='array')
diff = np.linalg.norm(J - Jbase)
assert_rel_error(self, diff, 0.0, 1e-8)
J = prob.calc_gradient(['x1', 'x2'], ['y1', 'y2'], mode='fd',
return_format='array')
diff = np.linalg.norm(J - Jbase)
assert_rel_error(self, diff, 0.0, 1e-8)
J = prob.calc_gradient(['x1', 'x2'], ['y1', 'y2'], mode='rev',
return_format='array')
diff = np.linalg.norm(J - Jbase)
assert_rel_error(self, diff, 0.0, 1e-8)
def test_simple_in_group_matvec(self):
group = Group()
sub = group.add('sub', Group(), promotes=['x', 'y'])
group.add('x_param', IndepVarComp('x', 1.0), promotes=['*'])
sub.add('mycomp', SimpleCompDerivMatVec(), promotes=['x', 'y'])
prob = Problem()
prob.root = group
prob.root.ln_solver = ScipyGMRES()
prob.setup(check=False)
prob.run()
J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict')
assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)
J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict')
assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)
def test_simple_jac(self):
group = Group()
group.add('x_param', IndepVarComp('x', 1.0), promotes=['*'])
group.add('mycomp', ExecComp(['y=2.0*x']), promotes=['x', 'y'])
prob = Problem()
prob.root = group
prob.root.ln_solver = ScipyGMRES()
prob.setup(check=False)
prob.run()
J = prob.calc_gradient(['x'], ['y'], mode='fwd', return_format='dict')
assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)
J = prob.calc_gradient(['x'], ['y'], mode='rev', return_format='dict')
assert_rel_error(self, J['y']['x'][0][0], 2.0, 1e-6)
def test_fan_out(self):
prob = Problem()
prob.root = FanOut()
prob.root.ln_solver = ScipyGMRES()
prob.setup(check=False)
prob.run()
indep_list = ['p.x']
unknown_list = ['comp2.y', "comp3.y"]
J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')
assert_rel_error(self, J['comp2.y']['p.x'][0][0], -6.0, 1e-6)
assert_rel_error(self, J['comp3.y']['p.x'][0][0], 15.0, 1e-6)
J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')
assert_rel_error(self, J['comp2.y']['p.x'][0][0], -6.0, 1e-6)
assert_rel_error(self, J['comp3.y']['p.x'][0][0], 15.0, 1e-6)
def test_fan_out_grouped(self):
prob = Problem()
prob.root = FanOutGrouped()
prob.root.ln_solver = ScipyGMRES()
prob.setup(check=False)
prob.run()
indep_list = ['p.x']
unknown_list = ['sub.comp2.y', "sub.comp3.y"]
J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')
assert_rel_error(self, J['sub.comp2.y']['p.x'][0][0], -6.0, 1e-6)
assert_rel_error(self, J['sub.comp3.y']['p.x'][0][0], 15.0, 1e-6)
J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')
assert_rel_error(self, J['sub.comp2.y']['p.x'][0][0], -6.0, 1e-6)
assert_rel_error(self, J['sub.comp3.y']['p.x'][0][0], 15.0, 1e-6)
def test_fan_in(self):
prob = Problem()
prob.root = FanIn()
prob.root.ln_solver = ScipyGMRES()
prob.setup(check=False)
prob.run()
indep_list = ['p1.x1', 'p2.x2']
unknown_list = ['comp3.y']
J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')
assert_rel_error(self, J['comp3.y']['p1.x1'][0][0], -6.0, 1e-6)
assert_rel_error(self, J['comp3.y']['p2.x2'][0][0], 35.0, 1e-6)
J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')
assert_rel_error(self, J['comp3.y']['p1.x1'][0][0], -6.0, 1e-6)
assert_rel_error(self, J['comp3.y']['p2.x2'][0][0], 35.0, 1e-6)
def test_fan_in_grouped(self):
prob = Problem()
prob.root = FanInGrouped()
prob.root.ln_solver = ScipyGMRES()
indep_list = ['p1.x1', 'p2.x2']
unknown_list = ['comp3.y']
prob.setup(check=False)
prob.run()
J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')
assert_rel_error(self, J['comp3.y']['p1.x1'][0][0], -6.0, 1e-6)
assert_rel_error(self, J['comp3.y']['p2.x2'][0][0], 35.0, 1e-6)
J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')
assert_rel_error(self, J['comp3.y']['p1.x1'][0][0], -6.0, 1e-6)
assert_rel_error(self, J['comp3.y']['p2.x2'][0][0], 35.0, 1e-6)
def test_converge_diverge(self):
prob = Problem()
prob.root = ConvergeDiverge()
prob.root.ln_solver = ScipyGMRES()
prob.setup(check=False)
prob.run()
indep_list = ['p.x']
unknown_list = ['comp7.y1']
prob.run()
# Make sure value is fine.
assert_rel_error(self, prob['comp7.y1'], -102.7, 1e-6)
J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')
assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6)
J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')
assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6)
J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='dict')
assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6)
def test_analysis_error(self):
prob = Problem()
prob.root = ConvergeDiverge()
prob.root.ln_solver = ScipyGMRES()
prob.root.ln_solver.options['maxiter'] = 2
prob.root.ln_solver.options['err_on_maxiter'] = True
prob.setup(check=False)
prob.run()
indep_list = ['p.x']
unknown_list = ['comp7.y1']
prob.run()
# Make sure value is fine.
assert_rel_error(self, prob['comp7.y1'], -102.7, 1e-6)
try:
J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')
except AnalysisError as err:
self.assertEqual(str(err), "Solve in '': ScipyGMRES failed to converge after 2 iterations")
else:
self.fail("expected AnalysisError")
def test_converge_diverge_groups(self):
prob = Problem()
prob.root = ConvergeDivergeGroups()
prob.root.ln_solver = ScipyGMRES()
prob.setup(check=False)
prob.run()
# Make sure value is fine.
assert_rel_error(self, prob['comp7.y1'], -102.7, 1e-6)
indep_list = ['p.x']
unknown_list = ['comp7.y1']
J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')
assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6)
J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')
assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6)
J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='dict')
assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6)
def test_single_diamond(self):
prob = Problem()
prob.root = SingleDiamond()
prob.root.ln_solver = ScipyGMRES()
prob.setup(check=False)
prob.run()
indep_list = ['p.x']
unknown_list = ['comp4.y1', 'comp4.y2']
J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')
assert_rel_error(self, J['comp4.y1']['p.x'][0][0], 25, 1e-6)
assert_rel_error(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-6)
J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')
assert_rel_error(self, J['comp4.y1']['p.x'][0][0], 25, 1e-6)
assert_rel_error(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-6)
def test_single_diamond_grouped(self):
prob = Problem()
prob.root = SingleDiamondGrouped()
prob.root.ln_solver = ScipyGMRES()
prob.setup(check=False)
prob.run()
indep_list = ['p.x']
unknown_list = ['comp4.y1', 'comp4.y2']
J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')
assert_rel_error(self, J['comp4.y1']['p.x'][0][0], 25, 1e-6)
assert_rel_error(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-6)
J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')
assert_rel_error(self, J['comp4.y1']['p.x'][0][0], 25, 1e-6)
assert_rel_error(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-6)
J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='dict')
assert_rel_error(self, J['comp4.y1']['p.x'][0][0], 25, 1e-6)
assert_rel_error(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-6)
def test_sellar_derivs_grouped(self):
prob = Problem()
prob.root = SellarDerivativesGrouped()
prob.root.mda.nl_solver.options['atol'] = 1e-12
prob.setup(check=False)
prob.run()
# Just make sure we are at the right answer
assert_rel_error(self, prob['y1'], 25.58830273, .00001)
assert_rel_error(self, prob['y2'], 12.05848819, .00001)
indep_list = ['x', 'z']
unknown_list = ['obj', 'con1', 'con2']
Jbase = {}
Jbase['con1'] = {}
Jbase['con1']['x'] = -0.98061433
Jbase['con1']['z'] = np.array([-9.61002285, -0.78449158])
Jbase['con2'] = {}
Jbase['con2']['x'] = 0.09692762
Jbase['con2']['z'] = np.array([1.94989079, 1.0775421 ])
Jbase['obj'] = {}
Jbase['obj']['x'] = 2.98061392
Jbase['obj']['z'] = np.array([9.61001155, 1.78448534])
J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')
for key1, val1 in Jbase.items():
for key2, val2 in val1.items():
assert_rel_error(self, J[key1][key2], val2, .00001)
J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')
for key1, val1 in Jbase.items():
for key2, val2 in val1.items():
assert_rel_error(self, J[key1][key2], val2, .00001)
# Cheat a bit so I can twiddle mode
OptionsDictionary.locked = False
prob.root.deriv_options['form'] = 'central'
J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='dict')
for key1, val1 in Jbase.items():
for key2, val2 in val1.items():
assert_rel_error(self, J[key1][key2], val2, .00001)
class TestScipyGMRESPreconditioner(unittest.TestCase):
def test_sellar_derivs_grouped_precon(self):
prob = Problem()
prob.root = SellarDerivativesGrouped()
prob.root.mda.nl_solver.options['atol'] = 1e-12
prob.root.ln_solver.preconditioner = LinearGaussSeidel()
prob.root.mda.ln_solver = DirectSolver()
prob.setup(check=False)
prob.run()
# Just make sure we are at the right answer
assert_rel_error(self, prob['y1'], 25.58830273, .00001)
assert_rel_error(self, prob['y2'], 12.05848819, .00001)
indep_list = ['x', 'z']
unknown_list = ['obj', 'con1', 'con2']
Jbase = {}
Jbase['con1'] = {}
Jbase['con1']['x'] = -0.98061433
Jbase['con1']['z'] = np.array([-9.61002285, -0.78449158])
Jbase['con2'] = {}
Jbase['con2']['x'] = 0.09692762
Jbase['con2']['z'] = np.array([1.94989079, 1.0775421 ])
Jbase['obj'] = {}
Jbase['obj']['x'] = 2.98061392
Jbase['obj']['z'] = np.array([9.61001155, 1.78448534])
J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')
for key1, val1 in Jbase.items():
for key2, val2 in val1.items():
assert_rel_error(self, J[key1][key2], val2, .00001)
J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')
for key1, val1 in Jbase.items():
for key2, val2 in val1.items():
assert_rel_error(self, J[key1][key2], val2, .00001)
def test_converge_diverge_groups(self):
prob = Problem()
prob.root = ConvergeDivergeGroups()
prob.root.ln_solver = ScipyGMRES()
prob.root.ln_solver.preconditioner = LinearGaussSeidel()
prob.root.sub1.ln_solver = DirectSolver()
prob.root.sub3.ln_solver = DirectSolver()
prob.setup(check=False)
prob.run()
# Make sure value is fine.
assert_rel_error(self, prob['comp7.y1'], -102.7, 1e-6)
indep_list = ['p.x']
unknown_list = ['comp7.y1']
J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')
assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6)
J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')
assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6)
J = prob.calc_gradient(indep_list, unknown_list, mode='fd', return_format='dict')
assert_rel_error(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-6)
def test_fan_out_all_grouped(self):
prob = Problem()
prob.root = FanOutAllGrouped()
prob.root.ln_solver = ScipyGMRES()
prob.root.ln_solver.preconditioner = LinearGaussSeidel()
prob.root.sub1.ln_solver = DirectSolver()
prob.root.sub2.ln_solver = DirectSolver()
prob.root.sub3.ln_solver = DirectSolver()
prob.setup(check=False)
prob.run()
indep_list = ['p.x']
unknown_list = ['sub2.comp2.y', "sub3.comp3.y"]
J = prob.calc_gradient(indep_list, unknown_list, mode='fwd', return_format='dict')
assert_rel_error(self, J['sub2.comp2.y']['p.x'][0][0], -6.0, 1e-6)
assert_rel_error(self, J['sub3.comp3.y']['p.x'][0][0], 15.0, 1e-6)
J = prob.calc_gradient(indep_list, unknown_list, mode='rev', return_format='dict')
assert_rel_error(self, J['sub2.comp2.y']['p.x'][0][0], -6.0, 1e-6)
assert_rel_error(self, J['sub3.comp3.y']['p.x'][0][0], 15.0, 1e-6)
if __name__ == "__main__":
unittest.main()
| [
"openmdao.api.ExecComp",
"openmdao.test.simple_comps.DoubleArrayComp",
"openmdao.test.converge_diverge.ConvergeDiverge",
"openmdao.test.simple_comps.FanInGrouped",
"numpy.ones",
"numpy.linalg.norm",
"openmdao.api.Group",
"unittest.main",
"openmdao.api.IndepVarComp",
"openmdao.test.simple_comps.Sim... | [((19143, 19158), 'unittest.main', 'unittest.main', ([], {}), '()\n', (19156, 19158), False, 'import unittest\n'), ((871, 878), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (876, 878), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((1038, 1047), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (1045, 1047), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((1104, 1116), 'openmdao.api.ScipyGMRES', 'ScipyGMRES', ([], {}), '()\n', (1114, 1116), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((1256, 1309), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['y']['x'][0][0]", '(2.0)', '(1e-06)'], {}), "(self, J['y']['x'][0][0], 2.0, 1e-06)\n", (1272, 1309), False, 'from openmdao.test.util import assert_rel_error\n'), ((1397, 1450), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['y']['x'][0][0]", '(2.0)', '(1e-06)'], {}), "(self, J['y']['x'][0][0], 2.0, 1e-06)\n", (1413, 1450), False, 'from openmdao.test.util import assert_rel_error\n'), ((1508, 1515), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (1513, 1515), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((1606, 1615), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (1613, 1615), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((1636, 1643), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (1641, 1643), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((1800, 1812), 'openmdao.api.ScipyGMRES', 'ScipyGMRES', ([], {}), '()\n', (1810, 1812), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((1952, 2005), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['y']['x'][0][0]", '(2.0)', '(1e-06)'], {}), "(self, J['y']['x'][0][0], 2.0, 1e-06)\n", (1968, 2005), False, 'from openmdao.test.util import assert_rel_error\n'), ((2093, 2146), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['y']['x'][0][0]", '(2.0)', '(1e-06)'], {}), "(self, J['y']['x'][0][0], 2.0, 1e-06)\n", (2109, 2146), False, 'from openmdao.test.util import assert_rel_error\n'), ((2233, 2286), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['y']['x'][0][0]", '(2.0)', '(1e-06)'], {}), "(self, J['y']['x'][0][0], 2.0, 1e-06)\n", (2249, 2286), False, 'from openmdao.test.util import assert_rel_error\n'), ((2360, 2367), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (2365, 2367), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((2458, 2467), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (2465, 2467), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((2488, 2495), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (2493, 2495), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((2656, 2668), 'openmdao.api.ScipyGMRES', 'ScipyGMRES', ([], {}), '()\n', (2666, 2668), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((2808, 2861), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['y']['x'][0][0]", '(2.0)', '(1e-06)'], {}), "(self, J['y']['x'][0][0], 2.0, 1e-06)\n", (2824, 2861), False, 'from openmdao.test.util import assert_rel_error\n'), ((2949, 3002), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['y']['x'][0][0]", '(2.0)', '(1e-06)'], {}), "(self, J['y']['x'][0][0], 2.0, 1e-06)\n", (2965, 3002), False, 'from openmdao.test.util import assert_rel_error\n'), ((3089, 3142), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['y']['x'][0][0]", '(2.0)', '(1e-06)'], {}), "(self, J['y']['x'][0][0], 2.0, 1e-06)\n", (3105, 3142), False, 'from openmdao.test.util import assert_rel_error\n'), ((3230, 3273), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', 'J[0][0]', '(2.0)', '(1e-06)'], {}), '(self, J[0][0], 2.0, 1e-06)\n', (3246, 3273), False, 'from openmdao.test.util import assert_rel_error\n'), ((3318, 3325), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (3323, 3325), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((3487, 3496), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (3494, 3496), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((3553, 3565), 'openmdao.api.ScipyGMRES', 'ScipyGMRES', ([], {}), '()\n', (3563, 3565), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((3761, 3806), 'numpy.linalg.norm', 'np.linalg.norm', (["(J['y']['x'] - Jbase['y', 'x'])"], {}), "(J['y']['x'] - Jbase['y', 'x'])\n", (3775, 3806), True, 'import numpy as np\n'), ((3815, 3855), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', 'diff', '(0.0)', '(1e-08)'], {}), '(self, diff, 0.0, 1e-08)\n', (3831, 3855), False, 'from openmdao.test.util import assert_rel_error\n'), ((3950, 3995), 'numpy.linalg.norm', 'np.linalg.norm', (["(J['y']['x'] - Jbase['y', 'x'])"], {}), "(J['y']['x'] - Jbase['y', 'x'])\n", (3964, 3995), True, 'import numpy as np\n'), ((4004, 4044), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', 'diff', '(0.0)', '(1e-08)'], {}), '(self, diff, 0.0, 1e-08)\n', (4020, 4044), False, 'from openmdao.test.util import assert_rel_error\n'), ((4170, 4177), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (4175, 4177), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((4417, 4426), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (4424, 4426), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((4483, 4495), 'openmdao.api.ScipyGMRES', 'ScipyGMRES', ([], {}), '()\n', (4493, 4495), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((4721, 4746), 'numpy.linalg.norm', 'np.linalg.norm', (['(J - Jbase)'], {}), '(J - Jbase)\n', (4735, 4746), True, 'import numpy as np\n'), ((4755, 4795), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', 'diff', '(0.0)', '(1e-08)'], {}), '(self, diff, 0.0, 1e-08)\n', (4771, 4795), False, 'from openmdao.test.util import assert_rel_error\n'), ((4935, 4960), 'numpy.linalg.norm', 'np.linalg.norm', (['(J - Jbase)'], {}), '(J - Jbase)\n', (4949, 4960), True, 'import numpy as np\n'), ((4969, 5009), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', 'diff', '(0.0)', '(1e-08)'], {}), '(self, diff, 0.0, 1e-08)\n', (4985, 5009), False, 'from openmdao.test.util import assert_rel_error\n'), ((5150, 5175), 'numpy.linalg.norm', 'np.linalg.norm', (['(J - Jbase)'], {}), '(J - Jbase)\n', (5164, 5175), True, 'import numpy as np\n'), ((5184, 5224), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', 'diff', '(0.0)', '(1e-08)'], {}), '(self, diff, 0.0, 1e-08)\n', (5200, 5224), False, 'from openmdao.test.util import assert_rel_error\n'), ((5284, 5291), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (5289, 5291), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((5510, 5519), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (5517, 5519), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((5576, 5588), 'openmdao.api.ScipyGMRES', 'ScipyGMRES', ([], {}), '()\n', (5586, 5588), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((5728, 5781), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['y']['x'][0][0]", '(2.0)', '(1e-06)'], {}), "(self, J['y']['x'][0][0], 2.0, 1e-06)\n", (5744, 5781), False, 'from openmdao.test.util import assert_rel_error\n'), ((5869, 5922), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['y']['x'][0][0]", '(2.0)', '(1e-06)'], {}), "(self, J['y']['x'][0][0], 2.0, 1e-06)\n", (5885, 5922), False, 'from openmdao.test.util import assert_rel_error\n'), ((5970, 5977), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (5975, 5977), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((6135, 6144), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (6142, 6144), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((6201, 6213), 'openmdao.api.ScipyGMRES', 'ScipyGMRES', ([], {}), '()\n', (6211, 6213), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((6353, 6406), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['y']['x'][0][0]", '(2.0)', '(1e-06)'], {}), "(self, J['y']['x'][0][0], 2.0, 1e-06)\n", (6369, 6406), False, 'from openmdao.test.util import assert_rel_error\n'), ((6494, 6547), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['y']['x'][0][0]", '(2.0)', '(1e-06)'], {}), "(self, J['y']['x'][0][0], 2.0, 1e-06)\n", (6510, 6547), False, 'from openmdao.test.util import assert_rel_error\n'), ((6592, 6601), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (6599, 6601), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((6622, 6630), 'openmdao.test.simple_comps.FanOut', 'FanOut', ([], {}), '()\n', (6628, 6630), False, 'from openmdao.test.simple_comps import SimpleCompDerivMatVec, FanOut, FanIn, FanOutGrouped, DoubleArrayComp, FanInGrouped, ArrayComp2D, FanOutAllGrouped\n'), ((6661, 6673), 'openmdao.api.ScipyGMRES', 'ScipyGMRES', ([], {}), '()\n', (6671, 6673), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((6901, 6963), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp2.y']['p.x'][0][0]", '(-6.0)', '(1e-06)'], {}), "(self, J['comp2.y']['p.x'][0][0], -6.0, 1e-06)\n", (6917, 6963), False, 'from openmdao.test.util import assert_rel_error\n'), ((6971, 7033), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp3.y']['p.x'][0][0]", '(15.0)', '(1e-06)'], {}), "(self, J['comp3.y']['p.x'][0][0], 15.0, 1e-06)\n", (6987, 7033), False, 'from openmdao.test.util import assert_rel_error\n'), ((7133, 7195), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp2.y']['p.x'][0][0]", '(-6.0)', '(1e-06)'], {}), "(self, J['comp2.y']['p.x'][0][0], -6.0, 1e-06)\n", (7149, 7195), False, 'from openmdao.test.util import assert_rel_error\n'), ((7203, 7265), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp3.y']['p.x'][0][0]", '(15.0)', '(1e-06)'], {}), "(self, J['comp3.y']['p.x'][0][0], 15.0, 1e-06)\n", (7219, 7265), False, 'from openmdao.test.util import assert_rel_error\n'), ((7318, 7327), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (7325, 7327), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((7348, 7363), 'openmdao.test.simple_comps.FanOutGrouped', 'FanOutGrouped', ([], {}), '()\n', (7361, 7363), False, 'from openmdao.test.simple_comps import SimpleCompDerivMatVec, FanOut, FanIn, FanOutGrouped, DoubleArrayComp, FanInGrouped, ArrayComp2D, FanOutAllGrouped\n'), ((7394, 7406), 'openmdao.api.ScipyGMRES', 'ScipyGMRES', ([], {}), '()\n', (7404, 7406), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((7642, 7708), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['sub.comp2.y']['p.x'][0][0]", '(-6.0)', '(1e-06)'], {}), "(self, J['sub.comp2.y']['p.x'][0][0], -6.0, 1e-06)\n", (7658, 7708), False, 'from openmdao.test.util import assert_rel_error\n'), ((7716, 7782), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['sub.comp3.y']['p.x'][0][0]", '(15.0)', '(1e-06)'], {}), "(self, J['sub.comp3.y']['p.x'][0][0], 15.0, 1e-06)\n", (7732, 7782), False, 'from openmdao.test.util import assert_rel_error\n'), ((7882, 7948), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['sub.comp2.y']['p.x'][0][0]", '(-6.0)', '(1e-06)'], {}), "(self, J['sub.comp2.y']['p.x'][0][0], -6.0, 1e-06)\n", (7898, 7948), False, 'from openmdao.test.util import assert_rel_error\n'), ((7956, 8022), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['sub.comp3.y']['p.x'][0][0]", '(15.0)', '(1e-06)'], {}), "(self, J['sub.comp3.y']['p.x'][0][0], 15.0, 1e-06)\n", (7972, 8022), False, 'from openmdao.test.util import assert_rel_error\n'), ((8066, 8075), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (8073, 8075), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((8096, 8103), 'openmdao.test.simple_comps.FanIn', 'FanIn', ([], {}), '()\n', (8101, 8103), False, 'from openmdao.test.simple_comps import SimpleCompDerivMatVec, FanOut, FanIn, FanOutGrouped, DoubleArrayComp, FanInGrouped, ArrayComp2D, FanOutAllGrouped\n'), ((8134, 8146), 'openmdao.api.ScipyGMRES', 'ScipyGMRES', ([], {}), '()\n', (8144, 8146), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((8374, 8438), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp3.y']['p1.x1'][0][0]", '(-6.0)', '(1e-06)'], {}), "(self, J['comp3.y']['p1.x1'][0][0], -6.0, 1e-06)\n", (8390, 8438), False, 'from openmdao.test.util import assert_rel_error\n'), ((8446, 8510), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp3.y']['p2.x2'][0][0]", '(35.0)', '(1e-06)'], {}), "(self, J['comp3.y']['p2.x2'][0][0], 35.0, 1e-06)\n", (8462, 8510), False, 'from openmdao.test.util import assert_rel_error\n'), ((8610, 8674), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp3.y']['p1.x1'][0][0]", '(-6.0)', '(1e-06)'], {}), "(self, J['comp3.y']['p1.x1'][0][0], -6.0, 1e-06)\n", (8626, 8674), False, 'from openmdao.test.util import assert_rel_error\n'), ((8682, 8746), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp3.y']['p2.x2'][0][0]", '(35.0)', '(1e-06)'], {}), "(self, J['comp3.y']['p2.x2'][0][0], 35.0, 1e-06)\n", (8698, 8746), False, 'from openmdao.test.util import assert_rel_error\n'), ((8798, 8807), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (8805, 8807), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((8828, 8842), 'openmdao.test.simple_comps.FanInGrouped', 'FanInGrouped', ([], {}), '()\n', (8840, 8842), False, 'from openmdao.test.simple_comps import SimpleCompDerivMatVec, FanOut, FanIn, FanOutGrouped, DoubleArrayComp, FanInGrouped, ArrayComp2D, FanOutAllGrouped\n'), ((8873, 8885), 'openmdao.api.ScipyGMRES', 'ScipyGMRES', ([], {}), '()\n', (8883, 8885), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((9114, 9178), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp3.y']['p1.x1'][0][0]", '(-6.0)', '(1e-06)'], {}), "(self, J['comp3.y']['p1.x1'][0][0], -6.0, 1e-06)\n", (9130, 9178), False, 'from openmdao.test.util import assert_rel_error\n'), ((9186, 9250), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp3.y']['p2.x2'][0][0]", '(35.0)', '(1e-06)'], {}), "(self, J['comp3.y']['p2.x2'][0][0], 35.0, 1e-06)\n", (9202, 9250), False, 'from openmdao.test.util import assert_rel_error\n'), ((9350, 9414), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp3.y']['p1.x1'][0][0]", '(-6.0)', '(1e-06)'], {}), "(self, J['comp3.y']['p1.x1'][0][0], -6.0, 1e-06)\n", (9366, 9414), False, 'from openmdao.test.util import assert_rel_error\n'), ((9422, 9486), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp3.y']['p2.x2'][0][0]", '(35.0)', '(1e-06)'], {}), "(self, J['comp3.y']['p2.x2'][0][0], 35.0, 1e-06)\n", (9438, 9486), False, 'from openmdao.test.util import assert_rel_error\n'), ((9540, 9549), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (9547, 9549), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((9570, 9587), 'openmdao.test.converge_diverge.ConvergeDiverge', 'ConvergeDiverge', ([], {}), '()\n', (9585, 9587), False, 'from openmdao.test.converge_diverge import ConvergeDiverge, SingleDiamond, ConvergeDivergeGroups, SingleDiamondGrouped\n'), ((9618, 9630), 'openmdao.api.ScipyGMRES', 'ScipyGMRES', ([], {}), '()\n', (9628, 9630), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((9812, 9867), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "prob['comp7.y1']", '(-102.7)', '(1e-06)'], {}), "(self, prob['comp7.y1'], -102.7, 1e-06)\n", (9828, 9867), False, 'from openmdao.test.util import assert_rel_error\n'), ((9967, 10032), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp7.y1']['p.x'][0][0]", '(-40.75)', '(1e-06)'], {}), "(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-06)\n", (9983, 10032), False, 'from openmdao.test.util import assert_rel_error\n'), ((10132, 10197), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp7.y1']['p.x'][0][0]", '(-40.75)', '(1e-06)'], {}), "(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-06)\n", (10148, 10197), False, 'from openmdao.test.util import assert_rel_error\n'), ((10296, 10361), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp7.y1']['p.x'][0][0]", '(-40.75)', '(1e-06)'], {}), "(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-06)\n", (10312, 10361), False, 'from openmdao.test.util import assert_rel_error\n'), ((10413, 10422), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (10420, 10422), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((10443, 10460), 'openmdao.test.converge_diverge.ConvergeDiverge', 'ConvergeDiverge', ([], {}), '()\n', (10458, 10460), False, 'from openmdao.test.converge_diverge import ConvergeDiverge, SingleDiamond, ConvergeDivergeGroups, SingleDiamondGrouped\n'), ((10491, 10503), 'openmdao.api.ScipyGMRES', 'ScipyGMRES', ([], {}), '()\n', (10501, 10503), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((10798, 10853), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "prob['comp7.y1']", '(-102.7)', '(1e-06)'], {}), "(self, prob['comp7.y1'], -102.7, 1e-06)\n", (10814, 10853), False, 'from openmdao.test.util import assert_rel_error\n'), ((11226, 11235), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (11233, 11235), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((11256, 11279), 'openmdao.test.converge_diverge.ConvergeDivergeGroups', 'ConvergeDivergeGroups', ([], {}), '()\n', (11277, 11279), False, 'from openmdao.test.converge_diverge import ConvergeDiverge, SingleDiamond, ConvergeDivergeGroups, SingleDiamondGrouped\n'), ((11310, 11322), 'openmdao.api.ScipyGMRES', 'ScipyGMRES', ([], {}), '()\n', (11320, 11322), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((11418, 11473), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "prob['comp7.y1']", '(-102.7)', '(1e-06)'], {}), "(self, prob['comp7.y1'], -102.7, 1e-06)\n", (11434, 11473), False, 'from openmdao.test.util import assert_rel_error\n'), ((11639, 11704), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp7.y1']['p.x'][0][0]", '(-40.75)', '(1e-06)'], {}), "(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-06)\n", (11655, 11704), False, 'from openmdao.test.util import assert_rel_error\n'), ((11804, 11869), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp7.y1']['p.x'][0][0]", '(-40.75)', '(1e-06)'], {}), "(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-06)\n", (11820, 11869), False, 'from openmdao.test.util import assert_rel_error\n'), ((11968, 12033), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp7.y1']['p.x'][0][0]", '(-40.75)', '(1e-06)'], {}), "(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-06)\n", (11984, 12033), False, 'from openmdao.test.util import assert_rel_error\n'), ((12085, 12094), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (12092, 12094), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((12115, 12130), 'openmdao.test.converge_diverge.SingleDiamond', 'SingleDiamond', ([], {}), '()\n', (12128, 12130), False, 'from openmdao.test.converge_diverge import ConvergeDiverge, SingleDiamond, ConvergeDivergeGroups, SingleDiamondGrouped\n'), ((12161, 12173), 'openmdao.api.ScipyGMRES', 'ScipyGMRES', ([], {}), '()\n', (12171, 12173), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((12403, 12464), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp4.y1']['p.x'][0][0]", '(25)', '(1e-06)'], {}), "(self, J['comp4.y1']['p.x'][0][0], 25, 1e-06)\n", (12419, 12464), False, 'from openmdao.test.util import assert_rel_error\n'), ((12472, 12536), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp4.y2']['p.x'][0][0]", '(-40.5)', '(1e-06)'], {}), "(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-06)\n", (12488, 12536), False, 'from openmdao.test.util import assert_rel_error\n'), ((12636, 12697), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp4.y1']['p.x'][0][0]", '(25)', '(1e-06)'], {}), "(self, J['comp4.y1']['p.x'][0][0], 25, 1e-06)\n", (12652, 12697), False, 'from openmdao.test.util import assert_rel_error\n'), ((12705, 12769), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp4.y2']['p.x'][0][0]", '(-40.5)', '(1e-06)'], {}), "(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-06)\n", (12721, 12769), False, 'from openmdao.test.util import assert_rel_error\n'), ((12829, 12838), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (12836, 12838), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((12859, 12881), 'openmdao.test.converge_diverge.SingleDiamondGrouped', 'SingleDiamondGrouped', ([], {}), '()\n', (12879, 12881), False, 'from openmdao.test.converge_diverge import ConvergeDiverge, SingleDiamond, ConvergeDivergeGroups, SingleDiamondGrouped\n'), ((12912, 12924), 'openmdao.api.ScipyGMRES', 'ScipyGMRES', ([], {}), '()\n', (12922, 12924), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((13154, 13215), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp4.y1']['p.x'][0][0]", '(25)', '(1e-06)'], {}), "(self, J['comp4.y1']['p.x'][0][0], 25, 1e-06)\n", (13170, 13215), False, 'from openmdao.test.util import assert_rel_error\n'), ((13223, 13287), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp4.y2']['p.x'][0][0]", '(-40.5)', '(1e-06)'], {}), "(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-06)\n", (13239, 13287), False, 'from openmdao.test.util import assert_rel_error\n'), ((13387, 13448), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp4.y1']['p.x'][0][0]", '(25)', '(1e-06)'], {}), "(self, J['comp4.y1']['p.x'][0][0], 25, 1e-06)\n", (13403, 13448), False, 'from openmdao.test.util import assert_rel_error\n'), ((13456, 13520), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp4.y2']['p.x'][0][0]", '(-40.5)', '(1e-06)'], {}), "(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-06)\n", (13472, 13520), False, 'from openmdao.test.util import assert_rel_error\n'), ((13619, 13680), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp4.y1']['p.x'][0][0]", '(25)', '(1e-06)'], {}), "(self, J['comp4.y1']['p.x'][0][0], 25, 1e-06)\n", (13635, 13680), False, 'from openmdao.test.util import assert_rel_error\n'), ((13688, 13752), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp4.y2']['p.x'][0][0]", '(-40.5)', '(1e-06)'], {}), "(self, J['comp4.y2']['p.x'][0][0], -40.5, 1e-06)\n", (13704, 13752), False, 'from openmdao.test.util import assert_rel_error\n'), ((13811, 13820), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (13818, 13820), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((13841, 13867), 'openmdao.test.sellar.SellarDerivativesGrouped', 'SellarDerivativesGrouped', ([], {}), '()\n', (13865, 13867), False, 'from openmdao.test.sellar import SellarDerivativesGrouped\n'), ((14037, 14091), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "prob['y1']", '(25.58830273)', '(1e-05)'], {}), "(self, prob['y1'], 25.58830273, 1e-05)\n", (14053, 14091), False, 'from openmdao.test.util import assert_rel_error\n'), ((14101, 14155), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "prob['y2']", '(12.05848819)', '(1e-05)'], {}), "(self, prob['y2'], 12.05848819, 1e-05)\n", (14117, 14155), False, 'from openmdao.test.util import assert_rel_error\n'), ((14354, 14390), 'numpy.array', 'np.array', (['[-9.61002285, -0.78449158]'], {}), '([-9.61002285, -0.78449158])\n', (14362, 14390), True, 'import numpy as np\n'), ((14487, 14520), 'numpy.array', 'np.array', (['[1.94989079, 1.0775421]'], {}), '([1.94989079, 1.0775421])\n', (14495, 14520), True, 'import numpy as np\n'), ((14615, 14649), 'numpy.array', 'np.array', (['[9.61001155, 1.78448534]'], {}), '([9.61001155, 1.78448534])\n', (14623, 14649), True, 'import numpy as np\n'), ((15645, 15654), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (15652, 15654), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((15675, 15701), 'openmdao.test.sellar.SellarDerivativesGrouped', 'SellarDerivativesGrouped', ([], {}), '()\n', (15699, 15701), False, 'from openmdao.test.sellar import SellarDerivativesGrouped\n'), ((15804, 15823), 'openmdao.api.LinearGaussSeidel', 'LinearGaussSeidel', ([], {}), '()\n', (15821, 15823), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((15858, 15872), 'openmdao.api.DirectSolver', 'DirectSolver', ([], {}), '()\n', (15870, 15872), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((15985, 16039), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "prob['y1']", '(25.58830273)', '(1e-05)'], {}), "(self, prob['y1'], 25.58830273, 1e-05)\n", (16001, 16039), False, 'from openmdao.test.util import assert_rel_error\n'), ((16049, 16103), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "prob['y2']", '(12.05848819)', '(1e-05)'], {}), "(self, prob['y2'], 12.05848819, 1e-05)\n", (16065, 16103), False, 'from openmdao.test.util import assert_rel_error\n'), ((16302, 16338), 'numpy.array', 'np.array', (['[-9.61002285, -0.78449158]'], {}), '([-9.61002285, -0.78449158])\n', (16310, 16338), True, 'import numpy as np\n'), ((16435, 16468), 'numpy.array', 'np.array', (['[1.94989079, 1.0775421]'], {}), '([1.94989079, 1.0775421])\n', (16443, 16468), True, 'import numpy as np\n'), ((16563, 16597), 'numpy.array', 'np.array', (['[9.61001155, 1.78448534]'], {}), '([9.61001155, 1.78448534])\n', (16571, 16597), True, 'import numpy as np\n'), ((17149, 17158), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (17156, 17158), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((17179, 17202), 'openmdao.test.converge_diverge.ConvergeDivergeGroups', 'ConvergeDivergeGroups', ([], {}), '()\n', (17200, 17202), False, 'from openmdao.test.converge_diverge import ConvergeDiverge, SingleDiamond, ConvergeDivergeGroups, SingleDiamondGrouped\n'), ((17233, 17245), 'openmdao.api.ScipyGMRES', 'ScipyGMRES', ([], {}), '()\n', (17243, 17245), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((17291, 17310), 'openmdao.api.LinearGaussSeidel', 'LinearGaussSeidel', ([], {}), '()\n', (17308, 17310), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((17347, 17361), 'openmdao.api.DirectSolver', 'DirectSolver', ([], {}), '()\n', (17359, 17361), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((17397, 17411), 'openmdao.api.DirectSolver', 'DirectSolver', ([], {}), '()\n', (17409, 17411), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((17508, 17563), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "prob['comp7.y1']", '(-102.7)', '(1e-06)'], {}), "(self, prob['comp7.y1'], -102.7, 1e-06)\n", (17524, 17563), False, 'from openmdao.test.util import assert_rel_error\n'), ((17729, 17794), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp7.y1']['p.x'][0][0]", '(-40.75)', '(1e-06)'], {}), "(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-06)\n", (17745, 17794), False, 'from openmdao.test.util import assert_rel_error\n'), ((17894, 17959), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp7.y1']['p.x'][0][0]", '(-40.75)', '(1e-06)'], {}), "(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-06)\n", (17910, 17959), False, 'from openmdao.test.util import assert_rel_error\n'), ((18058, 18123), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['comp7.y1']['p.x'][0][0]", '(-40.75)', '(1e-06)'], {}), "(self, J['comp7.y1']['p.x'][0][0], -40.75, 1e-06)\n", (18074, 18123), False, 'from openmdao.test.util import assert_rel_error\n'), ((18180, 18189), 'openmdao.api.Problem', 'Problem', ([], {}), '()\n', (18187, 18189), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((18210, 18228), 'openmdao.test.simple_comps.FanOutAllGrouped', 'FanOutAllGrouped', ([], {}), '()\n', (18226, 18228), False, 'from openmdao.test.simple_comps import SimpleCompDerivMatVec, FanOut, FanIn, FanOutGrouped, DoubleArrayComp, FanInGrouped, ArrayComp2D, FanOutAllGrouped\n'), ((18259, 18271), 'openmdao.api.ScipyGMRES', 'ScipyGMRES', ([], {}), '()\n', (18269, 18271), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((18318, 18337), 'openmdao.api.LinearGaussSeidel', 'LinearGaussSeidel', ([], {}), '()\n', (18335, 18337), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((18373, 18387), 'openmdao.api.DirectSolver', 'DirectSolver', ([], {}), '()\n', (18385, 18387), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((18423, 18437), 'openmdao.api.DirectSolver', 'DirectSolver', ([], {}), '()\n', (18435, 18437), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((18473, 18487), 'openmdao.api.DirectSolver', 'DirectSolver', ([], {}), '()\n', (18485, 18487), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((18726, 18793), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['sub2.comp2.y']['p.x'][0][0]", '(-6.0)', '(1e-06)'], {}), "(self, J['sub2.comp2.y']['p.x'][0][0], -6.0, 1e-06)\n", (18742, 18793), False, 'from openmdao.test.util import assert_rel_error\n'), ((18801, 18868), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['sub3.comp3.y']['p.x'][0][0]", '(15.0)', '(1e-06)'], {}), "(self, J['sub3.comp3.y']['p.x'][0][0], 15.0, 1e-06)\n", (18817, 18868), False, 'from openmdao.test.util import assert_rel_error\n'), ((18968, 19035), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['sub2.comp2.y']['p.x'][0][0]", '(-6.0)', '(1e-06)'], {}), "(self, J['sub2.comp2.y']['p.x'][0][0], -6.0, 1e-06)\n", (18984, 19035), False, 'from openmdao.test.util import assert_rel_error\n'), ((19043, 19110), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', "J['sub3.comp3.y']['p.x'][0][0]", '(15.0)', '(1e-06)'], {}), "(self, J['sub3.comp3.y']['p.x'][0][0], 15.0, 1e-06)\n", (19059, 19110), False, 'from openmdao.test.util import assert_rel_error\n'), ((908, 930), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x"""', '(1.0)'], {}), "('x', 1.0)\n", (920, 930), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((976, 999), 'openmdao.test.simple_comps.SimpleCompDerivMatVec', 'SimpleCompDerivMatVec', ([], {}), '()\n', (997, 999), False, 'from openmdao.test.simple_comps import SimpleCompDerivMatVec, FanOut, FanIn, FanOutGrouped, DoubleArrayComp, FanInGrouped, ArrayComp2D, FanOutAllGrouped\n'), ((1544, 1567), 'openmdao.test.simple_comps.SimpleCompDerivMatVec', 'SimpleCompDerivMatVec', ([], {}), '()\n', (1565, 1567), False, 'from openmdao.test.simple_comps import SimpleCompDerivMatVec, FanOut, FanIn, FanOutGrouped, DoubleArrayComp, FanInGrouped, ArrayComp2D, FanOutAllGrouped\n'), ((1677, 1699), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x"""', '(1.0)'], {}), "('x', 1.0)\n", (1689, 1699), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((2396, 2419), 'openmdao.test.simple_comps.SimpleCompDerivMatVec', 'SimpleCompDerivMatVec', ([], {}), '()\n', (2417, 2419), False, 'from openmdao.test.simple_comps import SimpleCompDerivMatVec, FanOut, FanIn, FanOutGrouped, DoubleArrayComp, FanInGrouped, ArrayComp2D, FanOutAllGrouped\n'), ((2585, 2607), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x"""', '(1.0)'], {}), "('x', 1.0)\n", (2597, 2607), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((3435, 3448), 'openmdao.test.simple_comps.ArrayComp2D', 'ArrayComp2D', ([], {}), '()\n', (3446, 3448), False, 'from openmdao.test.simple_comps import SimpleCompDerivMatVec, FanOut, FanIn, FanOutGrouped, DoubleArrayComp, FanInGrouped, ArrayComp2D, FanOutAllGrouped\n'), ((4366, 4383), 'openmdao.test.simple_comps.DoubleArrayComp', 'DoubleArrayComp', ([], {}), '()\n', (4381, 4383), False, 'from openmdao.test.simple_comps import SimpleCompDerivMatVec, FanOut, FanIn, FanOutGrouped, DoubleArrayComp, FanInGrouped, ArrayComp2D, FanOutAllGrouped\n'), ((5323, 5330), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (5328, 5330), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((5382, 5404), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x"""', '(1.0)'], {}), "('x', 1.0)\n", (5394, 5404), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((5448, 5471), 'openmdao.test.simple_comps.SimpleCompDerivMatVec', 'SimpleCompDerivMatVec', ([], {}), '()\n', (5469, 5471), False, 'from openmdao.test.simple_comps import SimpleCompDerivMatVec, FanOut, FanIn, FanOutGrouped, DoubleArrayComp, FanInGrouped, ArrayComp2D, FanOutAllGrouped\n'), ((6007, 6029), 'openmdao.api.IndepVarComp', 'IndepVarComp', (['"""x"""', '(1.0)'], {}), "('x', 1.0)\n", (6019, 6029), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((6075, 6096), 'openmdao.api.ExecComp', 'ExecComp', (["['y=2.0*x']"], {}), "(['y=2.0*x'])\n", (6083, 6096), False, 'from openmdao.api import Group, Problem, IndepVarComp, ScipyGMRES, DirectSolver, ExecComp, LinearGaussSeidel, AnalysisError\n'), ((3373, 3388), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (3380, 3388), True, 'import numpy as np\n'), ((4227, 4237), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (4234, 4237), True, 'import numpy as np\n'), ((4307, 4317), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (4314, 4317), True, 'import numpy as np\n'), ((14843, 14893), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', 'J[key1][key2]', 'val2', '(1e-05)'], {}), '(self, J[key1][key2], val2, 1e-05)\n', (14859, 14893), False, 'from openmdao.test.util import assert_rel_error\n'), ((15088, 15138), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', 'J[key1][key2]', 'val2', '(1e-05)'], {}), '(self, J[key1][key2], val2, 1e-05)\n', (15104, 15138), False, 'from openmdao.test.util import assert_rel_error\n'), ((15470, 15520), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', 'J[key1][key2]', 'val2', '(1e-05)'], {}), '(self, J[key1][key2], val2, 1e-05)\n', (15486, 15520), False, 'from openmdao.test.util import assert_rel_error\n'), ((16791, 16841), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', 'J[key1][key2]', 'val2', '(1e-05)'], {}), '(self, J[key1][key2], val2, 1e-05)\n', (16807, 16841), False, 'from openmdao.test.util import assert_rel_error\n'), ((17036, 17086), 'openmdao.test.util.assert_rel_error', 'assert_rel_error', (['self', 'J[key1][key2]', 'val2', '(1e-05)'], {}), '(self, J[key1][key2], val2, 1e-05)\n', (17052, 17086), False, 'from openmdao.test.util import assert_rel_error\n')] |
from npnlp import minimize
import numpy as np
tol = 1e-6
def test_sqp1():
def J(x):
return np.array([x[0] ** 4 + x[1] ** 2 - x[0] ** 2 * x[1]])
x0 = np.array([0.5, 3.0])
nil = np.array([])
out = minimize(J, x0, Aeq=np.array([[1,0]]), beq=np.array([1]), method='SQP')
assert abs(out['x'][0] - 1) < tol
assert abs(out['x'][1] - 0.5) < tol
assert abs(out['grad'][0] - 3) < tol
assert abs(out['grad'][1] - 0) < tol
assert abs(out['kkt'].equality_linear[0] + 3) < tol
def test_sqp2():
def J(x):
return np.array([x[0] ** 4 + x[1] ** 2 - x[0] ** 2 * x[1]])
x0 = np.array([0.5, 3.0])
nil = np.array([])
out = minimize(J, x0, A=np.array([[1,0]]), b=np.array([-1]), method='SQP')
assert abs(out['x'][0] + 1) < tol
assert abs(out['x'][1] - 0.5) < tol
assert abs(out['grad'][0] + 3) < tol
assert abs(out['grad'][1] - 0) < tol
assert abs(out['kkt'].inequality_linear[0] - 3) < tol
def test_sqp3():
def J(x):
return np.array([x[0] ** 4 + x[1] ** 2 - x[0] ** 2 * x[1]])
def eq_con(x, kkt):
return np.array([1 - 2 * x[0] * x[1] / 3, (3 * x[0] ** 2 - 4 * x[1]) / 3 + 1])
x0 = np.array([0.5, 3.0])
nil = np.array([])
out = minimize(J, x0, nonlconeq=eq_con, method='SQP')
assert abs(out['x'][0] - 1) < tol
assert abs(out['x'][1] - 1.5) < tol
assert abs(out['grad'][0] - 1) < tol
assert abs(out['grad'][1] - 2) < tol
assert abs(out['kkt'].equality_nonlinear[0] - 2) < tol
assert abs(out['kkt'].equality_nonlinear[1] - 0.5) < tol
def test_sqp4():
def J(x):
return np.array([x[0] ** 4 + x[1] ** 2 - x[0] ** 2 * x[1]])
def eq_con(x, l):
return np.array([1 - 2 * x[0] * x[1] / 3, (3 * x[0] ** 2 - 4 * x[1]) / 3 + 1])
x0 = np.array([0.5, 3.0])
nil = np.array([])
out = minimize(J, x0, nonlconineq=eq_con, method='SQP')
assert abs(out['x'][0] - 1) < tol
assert abs(out['x'][1] - 1.5) < tol
assert abs(out['grad'][0] - 1) < tol
assert abs(out['grad'][1] - 2) < tol
assert abs(out['kkt'].inequality_nonlinear[0] - 2) < tol
assert abs(out['kkt'].inequality_nonlinear[1] - 0.5) < tol
| [
"numpy.array",
"npnlp.minimize"
] | [((168, 188), 'numpy.array', 'np.array', (['[0.5, 3.0]'], {}), '([0.5, 3.0])\n', (176, 188), True, 'import numpy as np\n'), ((199, 211), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (207, 211), True, 'import numpy as np\n'), ((620, 640), 'numpy.array', 'np.array', (['[0.5, 3.0]'], {}), '([0.5, 3.0])\n', (628, 640), True, 'import numpy as np\n'), ((651, 663), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (659, 663), True, 'import numpy as np\n'), ((1183, 1203), 'numpy.array', 'np.array', (['[0.5, 3.0]'], {}), '([0.5, 3.0])\n', (1191, 1203), True, 'import numpy as np\n'), ((1214, 1226), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1222, 1226), True, 'import numpy as np\n'), ((1237, 1284), 'npnlp.minimize', 'minimize', (['J', 'x0'], {'nonlconeq': 'eq_con', 'method': '"""SQP"""'}), "(J, x0, nonlconeq=eq_con, method='SQP')\n", (1245, 1284), False, 'from npnlp import minimize\n'), ((1785, 1805), 'numpy.array', 'np.array', (['[0.5, 3.0]'], {}), '([0.5, 3.0])\n', (1793, 1805), True, 'import numpy as np\n'), ((1816, 1828), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1824, 1828), True, 'import numpy as np\n'), ((1839, 1888), 'npnlp.minimize', 'minimize', (['J', 'x0'], {'nonlconineq': 'eq_con', 'method': '"""SQP"""'}), "(J, x0, nonlconineq=eq_con, method='SQP')\n", (1847, 1888), False, 'from npnlp import minimize\n'), ((105, 157), 'numpy.array', 'np.array', (['[x[0] ** 4 + x[1] ** 2 - x[0] ** 2 * x[1]]'], {}), '([x[0] ** 4 + x[1] ** 2 - x[0] ** 2 * x[1]])\n', (113, 157), True, 'import numpy as np\n'), ((557, 609), 'numpy.array', 'np.array', (['[x[0] ** 4 + x[1] ** 2 - x[0] ** 2 * x[1]]'], {}), '([x[0] ** 4 + x[1] ** 2 - x[0] ** 2 * x[1]])\n', (565, 609), True, 'import numpy as np\n'), ((1008, 1060), 'numpy.array', 'np.array', (['[x[0] ** 4 + x[1] ** 2 - x[0] ** 2 * x[1]]'], {}), '([x[0] ** 4 + x[1] ** 2 - x[0] ** 2 * x[1]])\n', (1016, 1060), True, 'import numpy as np\n'), ((1101, 1172), 'numpy.array', 'np.array', (['[1 - 2 * x[0] * x[1] / 3, (3 * x[0] ** 2 - 4 * x[1]) / 3 + 1]'], {}), '([1 - 2 * x[0] * x[1] / 3, (3 * x[0] ** 2 - 4 * x[1]) / 3 + 1])\n', (1109, 1172), True, 'import numpy as np\n'), ((1612, 1664), 'numpy.array', 'np.array', (['[x[0] ** 4 + x[1] ** 2 - x[0] ** 2 * x[1]]'], {}), '([x[0] ** 4 + x[1] ** 2 - x[0] ** 2 * x[1]])\n', (1620, 1664), True, 'import numpy as np\n'), ((1703, 1774), 'numpy.array', 'np.array', (['[1 - 2 * x[0] * x[1] / 3, (3 * x[0] ** 2 - 4 * x[1]) / 3 + 1]'], {}), '([1 - 2 * x[0] * x[1] / 3, (3 * x[0] ** 2 - 4 * x[1]) / 3 + 1])\n', (1711, 1774), True, 'import numpy as np\n'), ((242, 260), 'numpy.array', 'np.array', (['[[1, 0]]'], {}), '([[1, 0]])\n', (250, 260), True, 'import numpy as np\n'), ((265, 278), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (273, 278), True, 'import numpy as np\n'), ((692, 710), 'numpy.array', 'np.array', (['[[1, 0]]'], {}), '([[1, 0]])\n', (700, 710), True, 'import numpy as np\n'), ((713, 727), 'numpy.array', 'np.array', (['[-1]'], {}), '([-1])\n', (721, 727), True, 'import numpy as np\n')] |
import numpy as np
from simba.similarities import dynamax_jaccard, avg_cosine
from simba.evaluation import evaluate, evaluate_multiple
from simba.core import embed
EMBED_PATH_LARGE = "tests/fixtures/test_embed_large.txt"
SENTENCES1 = ["In the jungle the mighty jungle",
"The lion sleeps tonight",
"Hush my darling do not fear my darling",
"The lion sleeps tonight"]
SENTENCES2 = ["Near the village the peaceful village",
"The lion sleeps tonight",
"My little darling",
"Do not fear my little darling"]
def patch_get_path(embedding, EMB_MAP):
return EMBED_PATH_LARGE
def test_end_to_end(monkeypatch):
monkeypatch.setattr("simba.core.get_path", patch_get_path)
sentences = ["In the jungle the mighty jungle", "The lion sleeps tonight"]
x, y = embed([s.split() for s in sentences], embedding='test_large')
assert np.isclose(dynamax_jaccard(x, y), 0.47254264, atol=1e-8)
def test_evaluate(monkeypatch):
monkeypatch.setattr("simba.core.get_path", patch_get_path)
embeddings1 = embed([s.split() for s in SENTENCES1],
embedding='test_large')
embeddings2 = embed([s.split() for s in SENTENCES2],
embedding='test_large')
expected, expected_cor = [[0.53112996, 1.0, 0.65611832, 0.40853999], None]
output_, output_cor = evaluate(embeddings1, embeddings2, dynamax_jaccard)
assert np.allclose(expected, output_)
assert expected_cor == output_cor
def test_evaluate_gold(monkeypatch):
monkeypatch.setattr("simba.core.get_path", patch_get_path)
gold_labels = [3, 5, 4, 1]
embeddings1 = embed([s.split() for s in SENTENCES1],
embedding='test_large')
embeddings2 = embed([s.split() for s in SENTENCES2],
embedding='test_large')
expected, expected_cor = [[0.53112996, 1.0, 0.65611832, 0.40853999],
0.91116447]
output_, output_cor = evaluate(embeddings1, embeddings2, dynamax_jaccard,
gold_scores=gold_labels)
assert np.allclose(expected, output_)
assert np.allclose(expected_cor, output_cor)
def test_evaluate_multiple(monkeypatch):
monkeypatch.setattr("simba.core.get_path", patch_get_path)
embeddings1 = embed([s.split() for s in SENTENCES1],
embedding='test_large')
embeddings2 = embed([s.split() for s in SENTENCES2],
embedding='test_large')
scores = evaluate_multiple(embeddings1, embeddings2,
[dynamax_jaccard, avg_cosine])
exp1 = ([0.73374721, 1., 0.84151215, 0.66572127], None)
exp2 = ([0.53112996, 1., 0.65611832, 0.40853999], None)
assert np.allclose(scores['avg_cosine'][0], exp1[0])
assert np.allclose(scores['dynamax_jaccard'][0], exp2[0])
| [
"simba.similarities.dynamax_jaccard",
"numpy.allclose",
"simba.evaluation.evaluate_multiple",
"simba.evaluation.evaluate"
] | [((1392, 1443), 'simba.evaluation.evaluate', 'evaluate', (['embeddings1', 'embeddings2', 'dynamax_jaccard'], {}), '(embeddings1, embeddings2, dynamax_jaccard)\n', (1400, 1443), False, 'from simba.evaluation import evaluate, evaluate_multiple\n'), ((1455, 1485), 'numpy.allclose', 'np.allclose', (['expected', 'output_'], {}), '(expected, output_)\n', (1466, 1485), True, 'import numpy as np\n'), ((2008, 2084), 'simba.evaluation.evaluate', 'evaluate', (['embeddings1', 'embeddings2', 'dynamax_jaccard'], {'gold_scores': 'gold_labels'}), '(embeddings1, embeddings2, dynamax_jaccard, gold_scores=gold_labels)\n', (2016, 2084), False, 'from simba.evaluation import evaluate, evaluate_multiple\n'), ((2131, 2161), 'numpy.allclose', 'np.allclose', (['expected', 'output_'], {}), '(expected, output_)\n', (2142, 2161), True, 'import numpy as np\n'), ((2173, 2210), 'numpy.allclose', 'np.allclose', (['expected_cor', 'output_cor'], {}), '(expected_cor, output_cor)\n', (2184, 2210), True, 'import numpy as np\n'), ((2540, 2614), 'simba.evaluation.evaluate_multiple', 'evaluate_multiple', (['embeddings1', 'embeddings2', '[dynamax_jaccard, avg_cosine]'], {}), '(embeddings1, embeddings2, [dynamax_jaccard, avg_cosine])\n', (2557, 2614), False, 'from simba.evaluation import evaluate, evaluate_multiple\n'), ((2777, 2822), 'numpy.allclose', 'np.allclose', (["scores['avg_cosine'][0]", 'exp1[0]'], {}), "(scores['avg_cosine'][0], exp1[0])\n", (2788, 2822), True, 'import numpy as np\n'), ((2834, 2884), 'numpy.allclose', 'np.allclose', (["scores['dynamax_jaccard'][0]", 'exp2[0]'], {}), "(scores['dynamax_jaccard'][0], exp2[0])\n", (2845, 2884), True, 'import numpy as np\n'), ((934, 955), 'simba.similarities.dynamax_jaccard', 'dynamax_jaccard', (['x', 'y'], {}), '(x, y)\n', (949, 955), False, 'from simba.similarities import dynamax_jaccard, avg_cosine\n')] |
import numpy as np
betas = np.array([[0.3,0.2,0.5],[0.4,0.2,0.4],[0.3,0.6,0.1]])
size_vocab = betas.shape[1]
print(betas.T)
ntopics = betas.shape[0]
print(ntopics)
print("> 1")
print(betas)
print("> 2")
print(np.log(betas))
print("> 3")
print(sum(np.log(betas)))
print("> 4")
print(sum(np.log(betas))/ntopics)
#betas = np.array([[0.1,0.2]])
print("> 5")
print(np.reshape((sum(np.log(betas))/ntopics),(size_vocab,1)))
deno = np.reshape((sum(np.log(betas))/ntopics),(size_vocab,1))
print("> 6")
print(np.ones( (ntopics,1) ))
print("> 7")
print(np.ones( (ntopics,1) ).dot(deno.T))
print("> 8")
print(betas * (np.log(betas) - deno))
betas_ds = np.copy(betas)
if np.min(betas_ds) < 1e-12:
betas_ds += 1e-12
deno = np.reshape((sum(np.log(betas_ds))/ntopics),(size_vocab,1))
deno = np.ones( (ntopics,1) ).dot(deno.T)
betas_ds = betas_ds * (np.log(betas_ds) - deno)
#print(betas_ds) | [
"numpy.log",
"numpy.copy",
"numpy.ones",
"numpy.min",
"numpy.array"
] | [((28, 89), 'numpy.array', 'np.array', (['[[0.3, 0.2, 0.5], [0.4, 0.2, 0.4], [0.3, 0.6, 0.1]]'], {}), '([[0.3, 0.2, 0.5], [0.4, 0.2, 0.4], [0.3, 0.6, 0.1]])\n', (36, 89), True, 'import numpy as np\n'), ((650, 664), 'numpy.copy', 'np.copy', (['betas'], {}), '(betas)\n', (657, 664), True, 'import numpy as np\n'), ((213, 226), 'numpy.log', 'np.log', (['betas'], {}), '(betas)\n', (219, 226), True, 'import numpy as np\n'), ((504, 525), 'numpy.ones', 'np.ones', (['(ntopics, 1)'], {}), '((ntopics, 1))\n', (511, 525), True, 'import numpy as np\n'), ((668, 684), 'numpy.min', 'np.min', (['betas_ds'], {}), '(betas_ds)\n', (674, 684), True, 'import numpy as np\n'), ((251, 264), 'numpy.log', 'np.log', (['betas'], {}), '(betas)\n', (257, 264), True, 'import numpy as np\n'), ((791, 812), 'numpy.ones', 'np.ones', (['(ntopics, 1)'], {}), '((ntopics, 1))\n', (798, 812), True, 'import numpy as np\n'), ((850, 866), 'numpy.log', 'np.log', (['betas_ds'], {}), '(betas_ds)\n', (856, 866), True, 'import numpy as np\n'), ((290, 303), 'numpy.log', 'np.log', (['betas'], {}), '(betas)\n', (296, 303), True, 'import numpy as np\n'), ((444, 457), 'numpy.log', 'np.log', (['betas'], {}), '(betas)\n', (450, 457), True, 'import numpy as np\n'), ((547, 568), 'numpy.ones', 'np.ones', (['(ntopics, 1)'], {}), '((ntopics, 1))\n', (554, 568), True, 'import numpy as np\n'), ((611, 624), 'numpy.log', 'np.log', (['betas'], {}), '(betas)\n', (617, 624), True, 'import numpy as np\n'), ((740, 756), 'numpy.log', 'np.log', (['betas_ds'], {}), '(betas_ds)\n', (746, 756), True, 'import numpy as np\n'), ((380, 393), 'numpy.log', 'np.log', (['betas'], {}), '(betas)\n', (386, 393), True, 'import numpy as np\n')] |
from size_color import change_size_color
from skimage import io,transform,color
import numpy as np
import os
import cv2
folderList =os.listdir('J:\\gt_db')
#把存放数据文件的目录J:\\gt_db下的所有文件夹名的信息存放到一个变量folderlist中
#folderlist 是一个结构体变量数组
length=len(folderList)
AuImage_data={}
for i in range(length):
folderName = 'J:\\gt_db\\' + folderList[i]
AuImgList=os.listdir(folderName)
k =len(AuImgList)
for m in range(k):
fileName= folderName + '\\' + AuImgList[m]
#获取图像文件的绝对路径
AuImage_data[m]=change_size_color(fileName)
#调用函数将图片文件改成灰度图且尺寸为64*64
#cv2.imshow('image',AuImage_data[m])
#cv2.waitKey(0)
#cv2.destroyAllWindows()
rows , cols = AuImage_data[m].shape
#得到原来图像的矩阵的参数
MidGrayPic = np.zeros((rows , cols))
#用得到的参数创建一个全零的矩阵,这个矩阵用来存储用下面的方法产生的灰度图像
for p in range(rows):
for j in range(cols):
#sum = 0
#sum = sum + AuImage_data[m][p][j]
#进行转化的关键公式,sum每次都因为后面的数字而不能超过255
MidGrayPic[p][j] = AuImage_data[m][p][j]*255
str='J:\\new_data\\' + folderList[i] + AuImgList[m][:-4] + '.jpg'
# 连接字符串形成生成的灰度图像的文件名,1:end-4去掉原来文件的后缀名
cv2.imwrite(str, MidGrayPic)
#写文件
| [
"cv2.imwrite",
"numpy.zeros",
"os.listdir",
"size_color.change_size_color"
] | [((132, 155), 'os.listdir', 'os.listdir', (['"""J:\\\\gt_db"""'], {}), "('J:\\\\gt_db')\n", (142, 155), False, 'import os\n'), ((353, 375), 'os.listdir', 'os.listdir', (['folderName'], {}), '(folderName)\n', (363, 375), False, 'import os\n'), ((530, 557), 'size_color.change_size_color', 'change_size_color', (['fileName'], {}), '(fileName)\n', (547, 557), False, 'from size_color import change_size_color\n'), ((788, 810), 'numpy.zeros', 'np.zeros', (['(rows, cols)'], {}), '((rows, cols))\n', (796, 810), True, 'import numpy as np\n'), ((1260, 1288), 'cv2.imwrite', 'cv2.imwrite', (['str', 'MidGrayPic'], {}), '(str, MidGrayPic)\n', (1271, 1288), False, 'import cv2\n')] |
import pytest
from .utils import digit_float
import numpy as np
vowel_data_y_dimension = 11
@pytest.fixture
def vowel_data():
from esl_model.datasets import VowelDataSet
data = VowelDataSet()
return data.return_all()
@pytest.fixture
def SAHeart_data():
from esl_model.datasets import SAHeartDataSet
data = SAHeartDataSet()
return data.return_all()
def test_vowel_data():
from esl_model.datasets import VowelDataSet
data = VowelDataSet()
assert list(data.train_y[:5]) == list(range(1, 6))
data.select_features = data.feature_names[:2]
assert np.array_equal(data.train_x[:1], data._train_x.iloc[:1, :2].values)
ft = list(range(3))
data.select_features = ft
assert np.array_equal(data.train_x[:1], data._train_x.iloc[:1, ft].values)
def test_indicator_matrix(vowel_data):
from esl_model.ch4.models import LinearRegressionIndicatorMatrix
train_x, train_y, test_x, test_y, features = vowel_data
lrm = LinearRegressionIndicatorMatrix(train_x=train_x, train_y=train_y, n_class=vowel_data_y_dimension)
lrm.pre_processing()
lrm.train()
print(lrm.error_rate)
test_result = lrm.test(test_x, test_y)
print(test_result.error_rate)
assert digit_float(lrm.error_rate) == 0.477
assert digit_float(test_result.error_rate) == 0.667
def test_LDA(vowel_data):
from esl_model.ch4.models import LDAModel
train_x, train_y, test_x, test_y, features = vowel_data
lda = LDAModel(train_x=train_x, train_y=train_y, n_class=vowel_data_y_dimension)
lda.pre_processing()
lda.train()
print(lda.y_hat[:10])
print(lda.error_rate)
te = lda.test(test_x, test_y)
print(te.error_rate)
assert digit_float(lda.error_rate) == 0.316
assert digit_float(te.error_rate) == 0.556
def test_QDA(vowel_data):
from esl_model.ch4.models import QDAModel
train_x, train_y, test_x, test_y, features = vowel_data
qda = QDAModel(train_x=train_x, train_y=train_y, n_class=vowel_data_y_dimension)
qda.pre_processing()
qda.train()
print(qda.y_hat[:10])
print(qda.error_rate)
te = qda.test(test_x, test_y).error_rate
print(te)
assert digit_float(qda.error_rate) == 0.011
assert digit_float(te) == 0.528
def test_RDA(vowel_data):
from esl_model.ch4.models import RDAModel
train_x, train_y, test_x, test_y, features = vowel_data
# http://waxworksmath.com/Authors/G_M/Hastie/WriteUp/weatherwax_epstein_hastie_solutions_manual.pdf
# pp 60
model = RDAModel(train_x=train_x, train_y=train_y, n_class=vowel_data_y_dimension, alpha=0.969697)
model.pre_processing()
model.train()
print(model.error_rate)
te = model.test(test_x, test_y)
print(te.error_rate)
assert digit_float(te.error_rate) == 0.478
def test_LDA_computation(vowel_data):
from esl_model.ch4.models import LDAForComputation
train_x, train_y, test_x, test_y, features = vowel_data
model = LDAForComputation(train_x=train_x, train_y=train_y, n_class=vowel_data_y_dimension)
model.pre_processing()
model.train()
from esl_model.ch4.models import LDAModel
lda = LDAModel(train_x=train_x, train_y=train_y, n_class=vowel_data_y_dimension)
lda.pre_processing()
lda.train()
print(model.error_rate)
assert np.isclose(model.error_rate, lda.error_rate)
assert np.isclose(model.test(test_x, test_y).error_rate, lda.test(test_x, test_y).error_rate)
def test_RRLDA(vowel_data):
from esl_model.ch4.models import ReducedRankLDAModel
train_x, train_y, test_x, test_y, features = vowel_data
model = ReducedRankLDAModel(train_x=train_x, train_y=train_y, n_class=vowel_data_y_dimension, L=2)
model.pre_processing()
model.train()
print(model.y_hat[:5])
print(model.error_rate)
te = model.test(test_x, test_y)
print(te.error_rate)
assert digit_float(model.error_rate) == 0.350
assert digit_float(te.error_rate) == 0.491
def test_SAHeart_data_set(SAHeart_data):
x, y, *_ = SAHeart_data
assert x[1, 2] == 4.41
assert list(y[:4]) == [1, 1, 0, 1]
def test_binary_logistic_regression(SAHeart_data):
from esl_model.datasets import SAHeartDataSet
data = SAHeartDataSet(select_features=[1, 2, 4, 8])
from esl_model.ch4.models import BinaryLogisticRegression
train_x = data.train_x
train_y = data.train_y
model = BinaryLogisticRegression(train_x=train_x, train_y=train_y, n_class=2, do_standardization=False)
model.pre_processing()
model.train()
print(model.beta_hat)
print(model.error_rate)
print('yhat', model.y_hat[:5])
print(repr(model.std_err))
print('z score', model.z_score)
eq_beta_hat = np.array([[-4.20427542],
[0.08070059],
[0.16758415],
[0.92411669],
[0.04404247]])
eq_std_err = np.array([0.498348, 0.02551477, 0.05418979, 0.22318295, 0.00974321])
assert np.allclose(model.beta_hat, eq_beta_hat)
assert digit_float(model.error_rate) == 0.268
assert np.allclose(model.std_err, eq_std_err)
data = SAHeartDataSet(select_features=[0, 1, 2, 4, 6, 7, 8])
train_x = data.train_x
train_y = data.train_y
model = BinaryLogisticRegression(train_x=train_x, train_y=train_y, n_class=2, do_standardization=False)
model.pre_processing()
model.train()
assert digit_float(model.error_rate) == 0.271
| [
"esl_model.datasets.SAHeartDataSet",
"esl_model.ch4.models.LinearRegressionIndicatorMatrix",
"numpy.allclose",
"esl_model.ch4.models.RDAModel",
"esl_model.datasets.VowelDataSet",
"esl_model.ch4.models.QDAModel",
"numpy.isclose",
"esl_model.ch4.models.LDAForComputation",
"numpy.array",
"numpy.array... | [((189, 203), 'esl_model.datasets.VowelDataSet', 'VowelDataSet', ([], {}), '()\n', (201, 203), False, 'from esl_model.datasets import VowelDataSet\n'), ((332, 348), 'esl_model.datasets.SAHeartDataSet', 'SAHeartDataSet', ([], {}), '()\n', (346, 348), False, 'from esl_model.datasets import SAHeartDataSet\n'), ((462, 476), 'esl_model.datasets.VowelDataSet', 'VowelDataSet', ([], {}), '()\n', (474, 476), False, 'from esl_model.datasets import VowelDataSet\n'), ((594, 661), 'numpy.array_equal', 'np.array_equal', (['data.train_x[:1]', 'data._train_x.iloc[:1, :2].values'], {}), '(data.train_x[:1], data._train_x.iloc[:1, :2].values)\n', (608, 661), True, 'import numpy as np\n'), ((728, 795), 'numpy.array_equal', 'np.array_equal', (['data.train_x[:1]', 'data._train_x.iloc[:1, ft].values'], {}), '(data.train_x[:1], data._train_x.iloc[:1, ft].values)\n', (742, 795), True, 'import numpy as np\n'), ((978, 1080), 'esl_model.ch4.models.LinearRegressionIndicatorMatrix', 'LinearRegressionIndicatorMatrix', ([], {'train_x': 'train_x', 'train_y': 'train_y', 'n_class': 'vowel_data_y_dimension'}), '(train_x=train_x, train_y=train_y, n_class=\n vowel_data_y_dimension)\n', (1009, 1080), False, 'from esl_model.ch4.models import LinearRegressionIndicatorMatrix\n'), ((1470, 1544), 'esl_model.ch4.models.LDAModel', 'LDAModel', ([], {'train_x': 'train_x', 'train_y': 'train_y', 'n_class': 'vowel_data_y_dimension'}), '(train_x=train_x, train_y=train_y, n_class=vowel_data_y_dimension)\n', (1478, 1544), False, 'from esl_model.ch4.models import LDAModel\n'), ((1940, 2014), 'esl_model.ch4.models.QDAModel', 'QDAModel', ([], {'train_x': 'train_x', 'train_y': 'train_y', 'n_class': 'vowel_data_y_dimension'}), '(train_x=train_x, train_y=train_y, n_class=vowel_data_y_dimension)\n', (1948, 2014), False, 'from esl_model.ch4.models import QDAModel\n'), ((2515, 2609), 'esl_model.ch4.models.RDAModel', 'RDAModel', ([], {'train_x': 'train_x', 'train_y': 'train_y', 'n_class': 'vowel_data_y_dimension', 'alpha': '(0.969697)'}), '(train_x=train_x, train_y=train_y, n_class=vowel_data_y_dimension,\n alpha=0.969697)\n', (2523, 2609), False, 'from esl_model.ch4.models import RDAModel\n'), ((2956, 3044), 'esl_model.ch4.models.LDAForComputation', 'LDAForComputation', ([], {'train_x': 'train_x', 'train_y': 'train_y', 'n_class': 'vowel_data_y_dimension'}), '(train_x=train_x, train_y=train_y, n_class=\n vowel_data_y_dimension)\n', (2973, 3044), False, 'from esl_model.ch4.models import LDAForComputation\n'), ((3142, 3216), 'esl_model.ch4.models.LDAModel', 'LDAModel', ([], {'train_x': 'train_x', 'train_y': 'train_y', 'n_class': 'vowel_data_y_dimension'}), '(train_x=train_x, train_y=train_y, n_class=vowel_data_y_dimension)\n', (3150, 3216), False, 'from esl_model.ch4.models import LDAModel\n'), ((3298, 3342), 'numpy.isclose', 'np.isclose', (['model.error_rate', 'lda.error_rate'], {}), '(model.error_rate, lda.error_rate)\n', (3308, 3342), True, 'import numpy as np\n'), ((3601, 3696), 'esl_model.ch4.models.ReducedRankLDAModel', 'ReducedRankLDAModel', ([], {'train_x': 'train_x', 'train_y': 'train_y', 'n_class': 'vowel_data_y_dimension', 'L': '(2)'}), '(train_x=train_x, train_y=train_y, n_class=\n vowel_data_y_dimension, L=2)\n', (3620, 3696), False, 'from esl_model.ch4.models import ReducedRankLDAModel\n'), ((4204, 4248), 'esl_model.datasets.SAHeartDataSet', 'SAHeartDataSet', ([], {'select_features': '[1, 2, 4, 8]'}), '(select_features=[1, 2, 4, 8])\n', (4218, 4248), False, 'from esl_model.datasets import SAHeartDataSet\n'), ((4378, 4477), 'esl_model.ch4.models.BinaryLogisticRegression', 'BinaryLogisticRegression', ([], {'train_x': 'train_x', 'train_y': 'train_y', 'n_class': '(2)', 'do_standardization': '(False)'}), '(train_x=train_x, train_y=train_y, n_class=2,\n do_standardization=False)\n', (4402, 4477), False, 'from esl_model.ch4.models import BinaryLogisticRegression\n'), ((4694, 4780), 'numpy.array', 'np.array', (['[[-4.20427542], [0.08070059], [0.16758415], [0.92411669], [0.04404247]]'], {}), '([[-4.20427542], [0.08070059], [0.16758415], [0.92411669], [\n 0.04404247]])\n', (4702, 4780), True, 'import numpy as np\n'), ((4890, 4958), 'numpy.array', 'np.array', (['[0.498348, 0.02551477, 0.05418979, 0.22318295, 0.00974321]'], {}), '([0.498348, 0.02551477, 0.05418979, 0.22318295, 0.00974321])\n', (4898, 4958), True, 'import numpy as np\n'), ((4972, 5012), 'numpy.allclose', 'np.allclose', (['model.beta_hat', 'eq_beta_hat'], {}), '(model.beta_hat, eq_beta_hat)\n', (4983, 5012), True, 'import numpy as np\n'), ((5074, 5112), 'numpy.allclose', 'np.allclose', (['model.std_err', 'eq_std_err'], {}), '(model.std_err, eq_std_err)\n', (5085, 5112), True, 'import numpy as np\n'), ((5125, 5178), 'esl_model.datasets.SAHeartDataSet', 'SAHeartDataSet', ([], {'select_features': '[0, 1, 2, 4, 6, 7, 8]'}), '(select_features=[0, 1, 2, 4, 6, 7, 8])\n', (5139, 5178), False, 'from esl_model.datasets import SAHeartDataSet\n'), ((5245, 5344), 'esl_model.ch4.models.BinaryLogisticRegression', 'BinaryLogisticRegression', ([], {'train_x': 'train_x', 'train_y': 'train_y', 'n_class': '(2)', 'do_standardization': '(False)'}), '(train_x=train_x, train_y=train_y, n_class=2,\n do_standardization=False)\n', (5269, 5344), False, 'from esl_model.ch4.models import BinaryLogisticRegression\n')] |
import numpy as np
white = 0
black = 1
def other(color):
return not color
west = 2
east = 3
north = 4
south = 5
num_channels = 6
boardsize = 13
padding = 2
input_size = boardsize+2*padding
neighbor_patterns = ((-1,0), (0,-1), (-1,1), (0,1), (1,0), (1,-1))
input_shape = (num_channels,input_size,input_size)
def cell(move):
x = ord(move[0].lower())-ord('a')+padding
y = int(move[1:])-1+padding
return (x,y)
def move(cell):
return chr(ord('a')+cell[0])+str(cell[1]+1)
#cell of the mirrored move
def cell_m(cell):
return (cell[1],cell[0])
def neighbors(cell):
"""
Return list of neighbors of the passed cell.
"""
x = cell[0]
y = cell[1]
return [(n[0]+x , n[1]+y) for n in neighbor_patterns\
if (0<=n[0]+x and n[0]+x<input_size and 0<=n[1]+y and n[1]+y<input_size)]
def mirror_game(game):
m_game = np.zeros(input_shape, dtype=bool)
m_game[white]=np.transpose(game[black])
m_game[black]=np.transpose(game[white])
m_game[north]=np.transpose(game[west])
m_game[east] =np.transpose(game[south])
m_game[south]=np.transpose(game[east])
m_game[west] =np.transpose(game[north])
return m_game
def flip_game(game):
m_game = np.zeros(input_shape, dtype=bool)
m_game[white] = np.rot90(game[white],2)
m_game[black] = np.rot90(game[black],2)
m_game[north] = np.rot90(game[south],2)
m_game[east] = np.rot90(game[west],2)
m_game[south] = np.rot90(game[north],2)
m_game[west] = np.rot90(game[east],2)
return m_game
def new_game(size = boardsize):
if(size%2 ==0):
raise ValueError("boardsize must be odd")
true_padding = (input_size - size)/2
game = np.zeros(input_shape, dtype=bool)
game[white, 0:true_padding, :] = 1
game[white, input_size-true_padding:, :] = 1
game[west, 0:true_padding, :] = 1
game[east, input_size-true_padding:, :] = 1
game[black, :, 0:true_padding] = 1
game[black, :, input_size-true_padding:] = 1
game[north, :, 0:true_padding] = 1
game[south, :, input_size-true_padding:] = 1
return game
def winner(game):
if(game[east,0,0] and game[west,0,0]):
return white
elif(game[north,0,0] and game[south,0,0]):
return black
return None
def flood_fill(game, cell, color, edge):
game[edge, cell[0], cell[1]] = 1
for n in neighbors(cell):
if(game[color, n[0], n[1]] and not game[edge, n[0], n[1]]):
flood_fill(game, n, color, edge)
def play_cell(game, cell, color):
edge1_connection = False
edge2_connection = False
game[color, cell[0], cell[1]] = 1
if(color == white):
edge1 = east
edge2 = west
else:
edge1 = north
edge2 = south
for n in neighbors(cell):
if(game[edge1, n[0], n[1]] and game[color, n[0], n[1]]):
edge1_connection = True
if(game[edge2, n[0], n[1]] and game[color, n[0], n[1]]):
edge2_connection = True
if(edge1_connection):
flood_fill(game, cell, color, edge1)
if(edge2_connection):
flood_fill(game, cell, color, edge2)
def state_string(state):
"""
Print an ascii representation of the input.
"""
w = 'O'
b = '@'
empty = '.'
end_color = '\033[0m'
edge1_color = '\033[31m'
edge2_color = '\033[32m'
both_color = '\033[33m'
invalid = '#'
ret = '\n'
coord_size = len(str(boardsize))
offset = 1
ret+=' '*(offset+2)
for x in range(input_size):
if(x<padding or x>=boardsize+padding):
ret+=' '*(offset*2+1)
else:
ret+=chr(ord('A')+(x-padding))+' '*offset*2
ret+='\n'
for y in range(input_size):
if(y<padding or y>=boardsize+padding):
ret+=' '*(offset*2+coord_size)
else:
ret+=str(y+1-padding)+' '*(offset*2+coord_size-len(str(y+1-padding)))
for x in range(input_size):
if(state[white, x, y] == 1):
if(state[west, x, y] == 1 and state[east, x, y]):
ret+=both_color
elif(state[west, x,y]):
ret+=edge1_color
elif(state[east, x, y]):
ret+=edge2_color
if(state[black, x, y] == 1):
ret+=invalid
else:
ret+=w
ret+=end_color
elif(state[black, x, y] == 1):
if(state[north, x, y] == 1 and state[south, x, y]):
ret+=both_color
elif(state[north, x,y]):
ret+=edge1_color
elif(state[south, x, y]):
ret+=edge2_color
ret+=b
ret+=end_color
else:
ret+=empty
ret+=' '*offset*2
ret+="\n"+' '*offset*(y+1)
ret+=' '*(offset*2+1)+(' '*offset*2)*input_size
return ret | [
"numpy.transpose",
"numpy.rot90",
"numpy.zeros"
] | [((817, 850), 'numpy.zeros', 'np.zeros', (['input_shape'], {'dtype': 'bool'}), '(input_shape, dtype=bool)\n', (825, 850), True, 'import numpy as np\n'), ((866, 891), 'numpy.transpose', 'np.transpose', (['game[black]'], {}), '(game[black])\n', (878, 891), True, 'import numpy as np\n'), ((907, 932), 'numpy.transpose', 'np.transpose', (['game[white]'], {}), '(game[white])\n', (919, 932), True, 'import numpy as np\n'), ((948, 972), 'numpy.transpose', 'np.transpose', (['game[west]'], {}), '(game[west])\n', (960, 972), True, 'import numpy as np\n'), ((988, 1013), 'numpy.transpose', 'np.transpose', (['game[south]'], {}), '(game[south])\n', (1000, 1013), True, 'import numpy as np\n'), ((1029, 1053), 'numpy.transpose', 'np.transpose', (['game[east]'], {}), '(game[east])\n', (1041, 1053), True, 'import numpy as np\n'), ((1069, 1094), 'numpy.transpose', 'np.transpose', (['game[north]'], {}), '(game[north])\n', (1081, 1094), True, 'import numpy as np\n'), ((1142, 1175), 'numpy.zeros', 'np.zeros', (['input_shape'], {'dtype': 'bool'}), '(input_shape, dtype=bool)\n', (1150, 1175), True, 'import numpy as np\n'), ((1193, 1217), 'numpy.rot90', 'np.rot90', (['game[white]', '(2)'], {}), '(game[white], 2)\n', (1201, 1217), True, 'import numpy as np\n'), ((1234, 1258), 'numpy.rot90', 'np.rot90', (['game[black]', '(2)'], {}), '(game[black], 2)\n', (1242, 1258), True, 'import numpy as np\n'), ((1275, 1299), 'numpy.rot90', 'np.rot90', (['game[south]', '(2)'], {}), '(game[south], 2)\n', (1283, 1299), True, 'import numpy as np\n'), ((1316, 1339), 'numpy.rot90', 'np.rot90', (['game[west]', '(2)'], {}), '(game[west], 2)\n', (1324, 1339), True, 'import numpy as np\n'), ((1356, 1380), 'numpy.rot90', 'np.rot90', (['game[north]', '(2)'], {}), '(game[north], 2)\n', (1364, 1380), True, 'import numpy as np\n'), ((1397, 1420), 'numpy.rot90', 'np.rot90', (['game[east]', '(2)'], {}), '(game[east], 2)\n', (1405, 1420), True, 'import numpy as np\n'), ((1575, 1608), 'numpy.zeros', 'np.zeros', (['input_shape'], {'dtype': 'bool'}), '(input_shape, dtype=bool)\n', (1583, 1608), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
This module is contains all the relevant classes that form the second layer
between the SELMA GUI and the data objects. It contains the following classes:
+ :class: `SDMSignals`
+ :class: `SelmaDataModel`
"""
# ====================================================================
import os
import numpy as np
from PyQt5 import (QtCore)
import threading
# ====================================================================
import SELMAData
import SELMADataIO
import SELMABatchAnalysis
# ====================================================================
class SDMSignals(QtCore.QObject):
"""
This class inherits from a QObject in order to store and connect
pyqtSignals.
"""
setPixmapSignal = QtCore.pyqtSignal(np.ndarray)
sendVesselMaskSignal = QtCore.pyqtSignal(np.ndarray)
sendMaskSignal = QtCore.pyqtSignal(np.ndarray)
setProgressBarSignal = QtCore.pyqtSignal(int)
setProgressLabelSignal = QtCore.pyqtSignal(str)
setFrameCountSignal = QtCore.pyqtSignal(int, int)
pixelValueSignal = QtCore.pyqtSignal(int, int, float)
errorMessageSignal = QtCore.pyqtSignal(str)
infoMessageSignal = QtCore.pyqtSignal(str)
sendImVarSignal = QtCore.pyqtSignal(dict)
class SelmaDataModel:
"""
This class is the hub that manages all the data that is used in the
program. It contains all the slots for signals sent from the GUI that
interact with the data in some way.
The class contains an instance of a SELMAData object, as well as all
variables needed to perform the vessel analysis. These are read from the
user settings.
The class furthermore handles sending data to the GUI as well as calling
IO methods from SELMADataIO.
"""
def __init__(self, selmaDataObject = None):
self._SDO = selmaDataObject
# self._medianDiameter = medianDiameter
self._frameCount = 1
self._frameMax = 0
self._displayT1 = False;
self.signalObject = SDMSignals()
'''Public'''
#Slots
# ------------------------------------------------------------------
def newFrameSlot(self, direction):
"""
Triggered when an unmodified wheelEvent happens in
SELMAGUI.
Cycles through the stored frames based on the direction of the
mouseWheel. Sends signals to set the frameLabel and the pixmap.
Args:
direction (int): +1 or -1, the direction of the scrollEvent.
Returns:
Sends signals with the following:
frame (numpy.ndarray): the frame at the new frameCount.
frameCount (int): the new frameCount.
frameMax (int): the total number of Frames.
"""
if self._SDO is None:
return
if not self._displayT1:
self._frameCount += direction
if self._frameCount <= 0:
self._frameCount = self._frameMax
if self._frameCount > self._frameMax:
self._frameCount = 1
self._displayFrame()
def loadMaskSlot(self, fname):
"""
Calls the loadMask function from SELMADataIO and sends the loaded
mask back to the GUI.
Args:
fname (str): path to the mask.
Returns:
Signal with the following:
mask (numpy.ndarray): the mask which was referred to.
"""
if fname is None or self._SDO is None:
return
mask = SELMADataIO.loadMask(fname)
if mask is None:
self.signalObject.errorMessageSignal.emit(
"This version of .mat file is not supported. Please " +
"save it as a non-v7.3 file and try again.")
return
#Ensure that the mask has the same dimensions as the Frames
frames = self._SDO.getFrames()
frame = frames[self._frameCount - 1]
maskShape = mask.shape
frameShape = frame.shape
if maskShape != frameShape:
errStr = "The dimensions of the frame and the mask do not align."
self.signalObject.errorMessageSignal.emit(errStr +
str(frameShape) +
str(maskShape))
else:
self._SDO.setMask(mask)
self.signalObject.sendMaskSignal.emit(mask)
def saveMaskSlot(self, fname):
"""
Gets the mask (if any) from the SDO and calls the saveMask function in
SELMADataIO.
Args:
fname (str): path to where the mask needs to be saved.
Returns:
"""
mask = self._SDO.getMask()
SELMADataIO.saveMask(fname, mask)
def segmentMaskSlot(self):
"""
"""
if self._SDO is None:
self.signalObject.errorMessageSignal.emit(
"Please load a PCA dicom first.")
return
if self._SDO.getT1() is None:
self.signalObject.errorMessageSignal.emit(
"Please load a T1 dicom first.")
return
self._SDO.segmentMask()
mask = self._SDO.getMask()
print(mask.shape, np.unique(mask))
self.signalObject.sendMaskSignal.emit(mask)
def thresholdMaskSlot(self):
"""Gets a new copy of the (thresholded) mask from the SDO and
returns it to the GUI"""
if self._SDO is None:
return
mask = self._SDO.getMask()
if mask is not None:
self.signalObject.sendMaskSignal.emit(mask)
def loadDCMSlot(self, fname):
"""
Loads a new DCM into the SDO. Triggered when the openAct is called.
Args:
fname (str): path to the Dicom file.
Returns:
Sends signals with the following:
frame (numpy.ndarray): the frame at the current frameCount.
frameCount (int): the current frameCount.
frameMax (int): the total number of Frames.
"""
if fname is None:
return
self._SDO = SELMAData.SELMADataObject(self.signalObject,
dcmFilename= fname)
self._frameCount = 1
self._frameMax = self._SDO.getNumFrames()
self._displayFrame()
def loadClassicDCMSlot(self, fnames):
"""
Loads a new classic DCM into the SDO. Triggered when the
openClassicAct is called.
Args:
fnames(tuple(str)): list of filenames
"""
if fnames is None:
return
self._SDO = SELMAData.SELMADataObject(self.signalObject,
dcmFilename=fnames,
classic = True)
self._frameCount = 1
self._frameMax = self._SDO.getNumFrames()
self._displayFrame()
def loadT1DCMSlot(self, fname):
"""
Loads a new T1 DCM into the program. Triggered when the
openT1Act is called.
Args:
fname (str): path to the Dicom file.
"""
if fname is None:
return
if self._SDO is None:
self.signalObject.errorMessageSignal.emit(
"Please load a PCA dicom first.")
return
self._SDO.setT1(fname)
self._t1FrameCount = 1
self._t1FrameMax = self._SDO.getT1().getNumFrames()
self._displayT1 = True
self._displayFrame()
def applyMaskSlot(self, mask):
"""
Sets the drawn mask into the data object.
Args:
mask (numpy.ndarray): mask from the GUI.
"""
self._SDO.setMask(mask)
def analyseVesselSlot(self):
"""
Slot for analyseVesselSignal. Tells the SDO to analyse the vessels
in its current dataset.
"""
if self._SDO is None:
self.signalObject.errorMessageSignal.emit("No DICOM loaded.")
return
self.analysisThread = threading.Thread(
target= self._SDO.analyseVessels,
daemon = True)
self.analysisThread.start()
def analyseBatchSlot(self, dirName):
"""Slot for the analyse batch signal.
Goes through the specified directory and finds all .dcm files which
do not have 'mask' in the name. The program then iterates over these
files:
A SelmaDataObject is created with the .dcm file.
The directory is then searched for a mask file which has the same
name as the .dcm (along with 'mask' somewhere in the name).
This can be any suitable mask type, or another Dicom file, in which
case a segmentation is made.
Then the vesselAnalysis function is called and the results are
written to a .txt file with the same name.
Args:
dirname(str): path to the directory containing all input files.
"""
#TODO: add progress feedback
self.signalObject.infoMessageSignal.emit(
"GUI may become unresponsive while executing batch analysis. "+
"Please do not close GUI until batch analysis is complete "+
"or an error has occured. Press OK to continue.")
files = os.listdir(dirName)
if not any(os.path.isdir(dirName + '/' + subfolder)
for subfolder in files):
SELMABatchAnalysis.EnhancedBatchAnalysis(dirName, files, self)
elif any(os.path.isdir(dirName + '/' + subfolder)
for subfolder in files):
SELMABatchAnalysis.ClassicBatchAnalysis(dirName, files, self)
def switchViewSlot(self):
if self._SDO is None:
return
if self._SDO.getT1() is None:
return
self._displayT1 = not self._displayT1
self._displayFrame()
def saveVesselStatisticsSlot(self, fname):
"""
Slot for saveVesselStatisticsSignal. Saves the statistics of the
significant vessels to the filename.
Args:
fname (str): path to where the result of the analysis should be
written.
"""
vesselDict = self._SDO.getVesselDict()
SELMADataIO.writeVesselDict(vesselDict, fname)
def pixelValueSlot(self, x,y):
"""
Slot for mouseMoveEvent in the GUI. Sends back the cursor location
as well as the value of the current frame under that location.
Args:
x (int): x-index of the frame
y (int): y-index of the frame
Returns:
Sends the following via a signal:
x (int): x-index of the frame
y (int): y-index of the frame
pixelValue (float): value of the current frame at [x,y]
"""
if self._SDO is None:
return
frames = self._SDO.getFrames()
frame = frames[self._frameCount - 1]
pixelValue = frame[y,x]
self.signalObject.pixelValueSignal.emit(x, y, pixelValue)
def getVarSlot(self):
if self._SDO is None:
return
variables = dict()
venc = self._SDO.getVenc()
variables['venc'] = venc
velRescale = self._SDO.getRescale()
variables['velscale'] = velRescale
#Return the variables
self.signalObject.sendImVarSignal.emit(variables)
def setVarSlot(self, variables):
"""Sets the user-defined variables stored in the ImVar window"""
if self._SDO is None:
self.signalObject.errorMessageSignal.emit("No DICOM loaded.")
return
for variable in variables:
if variable == "venc":
self._SDO.setVenc(variables['venc'])
if variable == "velscale":
self._SDO.setVelRescale(variables["velscale"])
#other variables
#Getter functions
# ------------------------------------------------------------------
def getSDO(self):
return self._SDO
# def getMedianDiameter(self):
# return self._medianDiameter
#Setter functions
# -----------------------------------------------------------------
# def setSDO(self, selmaDataObject):
# self._SDO = selmaDataObject
# def setMedianDiameter(self, diam):
# self._medianDiameter = diam
'''Private'''
def _displayFrame(self):
if self._displayT1:
frame = self._SDO.getT1().getFrames()
# frame = frames[self._t1FrameCount - 1]
self.signalObject.setFrameCountSignal.emit(1, 1)
else:
frames = self._SDO.getFrames()
frame = frames[self._frameCount - 1]
self.signalObject.setFrameCountSignal.emit(self._frameCount,
self._frameMax)
self.signalObject.setPixmapSignal.emit(frame)
| [
"PyQt5.QtCore.pyqtSignal",
"threading.Thread",
"SELMABatchAnalysis.ClassicBatchAnalysis",
"os.path.isdir",
"SELMADataIO.saveMask",
"SELMAData.SELMADataObject",
"SELMADataIO.loadMask",
"SELMABatchAnalysis.EnhancedBatchAnalysis",
"SELMADataIO.writeVesselDict",
"os.listdir",
"numpy.unique"
] | [((769, 798), 'PyQt5.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['np.ndarray'], {}), '(np.ndarray)\n', (786, 798), False, 'from PyQt5 import QtCore\n'), ((830, 859), 'PyQt5.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['np.ndarray'], {}), '(np.ndarray)\n', (847, 859), False, 'from PyQt5 import QtCore\n'), ((891, 920), 'PyQt5.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['np.ndarray'], {}), '(np.ndarray)\n', (908, 920), False, 'from PyQt5 import QtCore\n'), ((952, 974), 'PyQt5.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['int'], {}), '(int)\n', (969, 974), False, 'from PyQt5 import QtCore\n'), ((1006, 1028), 'PyQt5.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['str'], {}), '(str)\n', (1023, 1028), False, 'from PyQt5 import QtCore\n'), ((1060, 1087), 'PyQt5.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['int', 'int'], {}), '(int, int)\n', (1077, 1087), False, 'from PyQt5 import QtCore\n'), ((1119, 1153), 'PyQt5.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['int', 'int', 'float'], {}), '(int, int, float)\n', (1136, 1153), False, 'from PyQt5 import QtCore\n'), ((1184, 1206), 'PyQt5.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['str'], {}), '(str)\n', (1201, 1206), False, 'from PyQt5 import QtCore\n'), ((1237, 1259), 'PyQt5.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['str'], {}), '(str)\n', (1254, 1259), False, 'from PyQt5 import QtCore\n'), ((1295, 1318), 'PyQt5.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['dict'], {}), '(dict)\n', (1312, 1318), False, 'from PyQt5 import QtCore\n'), ((3814, 3841), 'SELMADataIO.loadMask', 'SELMADataIO.loadMask', (['fname'], {}), '(fname)\n', (3834, 3841), False, 'import SELMADataIO\n'), ((5115, 5148), 'SELMADataIO.saveMask', 'SELMADataIO.saveMask', (['fname', 'mask'], {}), '(fname, mask)\n', (5135, 5148), False, 'import SELMADataIO\n'), ((6685, 6748), 'SELMAData.SELMADataObject', 'SELMAData.SELMADataObject', (['self.signalObject'], {'dcmFilename': 'fname'}), '(self.signalObject, dcmFilename=fname)\n', (6710, 6748), False, 'import SELMAData\n'), ((7271, 7349), 'SELMAData.SELMADataObject', 'SELMAData.SELMADataObject', (['self.signalObject'], {'dcmFilename': 'fnames', 'classic': '(True)'}), '(self.signalObject, dcmFilename=fnames, classic=True)\n', (7296, 7349), False, 'import SELMAData\n'), ((8825, 8887), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._SDO.analyseVessels', 'daemon': '(True)'}), '(target=self._SDO.analyseVessels, daemon=True)\n', (8841, 8887), False, 'import threading\n'), ((10176, 10195), 'os.listdir', 'os.listdir', (['dirName'], {}), '(dirName)\n', (10186, 10195), False, 'import os\n'), ((11253, 11299), 'SELMADataIO.writeVesselDict', 'SELMADataIO.writeVesselDict', (['vesselDict', 'fname'], {}), '(vesselDict, fname)\n', (11280, 11299), False, 'import SELMADataIO\n'), ((5670, 5685), 'numpy.unique', 'np.unique', (['mask'], {}), '(mask)\n', (5679, 5685), True, 'import numpy as np\n'), ((10328, 10390), 'SELMABatchAnalysis.EnhancedBatchAnalysis', 'SELMABatchAnalysis.EnhancedBatchAnalysis', (['dirName', 'files', 'self'], {}), '(dirName, files, self)\n', (10368, 10390), False, 'import SELMABatchAnalysis\n'), ((10530, 10591), 'SELMABatchAnalysis.ClassicBatchAnalysis', 'SELMABatchAnalysis.ClassicBatchAnalysis', (['dirName', 'files', 'self'], {}), '(dirName, files, self)\n', (10569, 10591), False, 'import SELMABatchAnalysis\n'), ((10224, 10264), 'os.path.isdir', 'os.path.isdir', (["(dirName + '/' + subfolder)"], {}), "(dirName + '/' + subfolder)\n", (10237, 10264), False, 'import os\n'), ((10421, 10461), 'os.path.isdir', 'os.path.isdir', (["(dirName + '/' + subfolder)"], {}), "(dirName + '/' + subfolder)\n", (10434, 10461), False, 'import os\n')] |
"""test masks."""
import os
import mercantile
import numpy
import pytest
import rasterio
from rasterio.coords import BoundingBox
from rasterio.crs import CRS
from rio_tiler import reader
tiles = {
"masked": mercantile.Tile(x=535, y=498, z=10),
"boundless": mercantile.Tile(x=540, y=497, z=10),
}
equator = {
"name": "equator",
"bounds": BoundingBox(left=382792.5, bottom=362992.5, right=610507.5, top=595207.5),
"crs": CRS.from_epsg(32632),
}
dataset = [
dict(equator, dtype="uint8", nodata_type="alpha"),
dict(equator, dtype="uint8", nodata_type="nodata"),
dict(equator, dtype="uint8", nodata_type="mask"),
dict(equator, dtype="int8", nodata_type="alpha"),
dict(equator, dtype="int8", nodata_type="nodata"),
dict(equator, dtype="int8", nodata_type="mask"),
# dict(equator, dtype="uint16", nodata_type="alpha"), #fail
dict(equator, dtype="uint16", nodata_type="nodata"),
dict(equator, dtype="uint16", nodata_type="mask"),
# dict(equator, dtype="int16", nodata_type="alpha"), # Fail
dict(equator, dtype="int16", nodata_type="nodata"),
# dict(equator, dtype="int16", nodata_type="mask"), # Fail
]
cog_path = os.path.join(os.path.dirname(__file__), "fixtures", "mask")
def test_mask_bilinear(cloudoptimized_geotiff):
"""Test mask read with bilinear resampling"""
src_path = cloudoptimized_geotiff(
cog_path, **equator, dtype="uint8", nodata_type="mask"
)
with rasterio.open(src_path) as src_dst:
data, mask = reader.tile(
src_dst,
535,
498,
10,
tilesize=256,
resampling_method="bilinear",
force_binary_mask=True,
)
masknodata = (data[0] != 0).astype(numpy.uint8) * 255
numpy.testing.assert_array_equal(mask, masknodata)
data, mask = reader.tile(
src_dst,
535,
498,
10,
tilesize=256,
resampling_method="bilinear",
force_binary_mask=False,
)
masknodata = (data[0] != 0).astype(numpy.uint8) * 255
assert not numpy.array_equal(mask, masknodata)
@pytest.mark.parametrize("resampling", ["bilinear", "nearest"])
@pytest.mark.parametrize("tile_name", ["masked"])
@pytest.mark.parametrize("dataset_info", dataset)
def test_mask(dataset_info, tile_name, resampling, cloudoptimized_geotiff):
"""Test tile read for multiple combination of datatype/mask/tile extent."""
src_path = cloudoptimized_geotiff(cog_path, **dataset_info)
tile = tiles[tile_name]
with rasterio.open(src_path) as src_dst:
data, mask = reader.tile(
src_dst,
tile.x,
tile.y,
tile.z,
tilesize=256,
resampling_method=resampling,
force_binary_mask=True,
)
masknodata = (data[0] != 0).astype(numpy.uint8) * 255
numpy.testing.assert_array_equal(mask, masknodata)
| [
"rasterio.open",
"mercantile.Tile",
"rasterio.coords.BoundingBox",
"numpy.testing.assert_array_equal",
"os.path.dirname",
"rio_tiler.reader.tile",
"numpy.array_equal",
"pytest.mark.parametrize",
"rasterio.crs.CRS.from_epsg"
] | [((2174, 2236), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""resampling"""', "['bilinear', 'nearest']"], {}), "('resampling', ['bilinear', 'nearest'])\n", (2197, 2236), False, 'import pytest\n'), ((2238, 2286), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""tile_name"""', "['masked']"], {}), "('tile_name', ['masked'])\n", (2261, 2286), False, 'import pytest\n'), ((2288, 2336), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dataset_info"""', 'dataset'], {}), "('dataset_info', dataset)\n", (2311, 2336), False, 'import pytest\n'), ((215, 250), 'mercantile.Tile', 'mercantile.Tile', ([], {'x': '(535)', 'y': '(498)', 'z': '(10)'}), '(x=535, y=498, z=10)\n', (230, 250), False, 'import mercantile\n'), ((269, 304), 'mercantile.Tile', 'mercantile.Tile', ([], {'x': '(540)', 'y': '(497)', 'z': '(10)'}), '(x=540, y=497, z=10)\n', (284, 304), False, 'import mercantile\n'), ((357, 430), 'rasterio.coords.BoundingBox', 'BoundingBox', ([], {'left': '(382792.5)', 'bottom': '(362992.5)', 'right': '(610507.5)', 'top': '(595207.5)'}), '(left=382792.5, bottom=362992.5, right=610507.5, top=595207.5)\n', (368, 430), False, 'from rasterio.coords import BoundingBox\n'), ((443, 463), 'rasterio.crs.CRS.from_epsg', 'CRS.from_epsg', (['(32632)'], {}), '(32632)\n', (456, 463), False, 'from rasterio.crs import CRS\n'), ((1193, 1218), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1208, 1218), False, 'import os\n'), ((1457, 1480), 'rasterio.open', 'rasterio.open', (['src_path'], {}), '(src_path)\n', (1470, 1480), False, 'import rasterio\n'), ((1514, 1621), 'rio_tiler.reader.tile', 'reader.tile', (['src_dst', '(535)', '(498)', '(10)'], {'tilesize': '(256)', 'resampling_method': '"""bilinear"""', 'force_binary_mask': '(True)'}), "(src_dst, 535, 498, 10, tilesize=256, resampling_method=\n 'bilinear', force_binary_mask=True)\n", (1525, 1621), False, 'from rio_tiler import reader\n'), ((1782, 1832), 'numpy.testing.assert_array_equal', 'numpy.testing.assert_array_equal', (['mask', 'masknodata'], {}), '(mask, masknodata)\n', (1814, 1832), False, 'import numpy\n'), ((1855, 1963), 'rio_tiler.reader.tile', 'reader.tile', (['src_dst', '(535)', '(498)', '(10)'], {'tilesize': '(256)', 'resampling_method': '"""bilinear"""', 'force_binary_mask': '(False)'}), "(src_dst, 535, 498, 10, tilesize=256, resampling_method=\n 'bilinear', force_binary_mask=False)\n", (1866, 1963), False, 'from rio_tiler import reader\n'), ((2595, 2618), 'rasterio.open', 'rasterio.open', (['src_path'], {}), '(src_path)\n', (2608, 2618), False, 'import rasterio\n'), ((2652, 2768), 'rio_tiler.reader.tile', 'reader.tile', (['src_dst', 'tile.x', 'tile.y', 'tile.z'], {'tilesize': '(256)', 'resampling_method': 'resampling', 'force_binary_mask': '(True)'}), '(src_dst, tile.x, tile.y, tile.z, tilesize=256,\n resampling_method=resampling, force_binary_mask=True)\n', (2663, 2768), False, 'from rio_tiler import reader\n'), ((2930, 2980), 'numpy.testing.assert_array_equal', 'numpy.testing.assert_array_equal', (['mask', 'masknodata'], {}), '(mask, masknodata)\n', (2962, 2980), False, 'import numpy\n'), ((2135, 2170), 'numpy.array_equal', 'numpy.array_equal', (['mask', 'masknodata'], {}), '(mask, masknodata)\n', (2152, 2170), False, 'import numpy\n')] |
import numpy as np
import statsmodels.api as sm
import pandas as pd
from ..mixins import Preprocessor, AlwaysPredictPlotter, AdvantageEstimator
def _year_to_decade(yr):
"""
A simple function so I don't mess this up later, this constructs the *redistricting*
decade of a district. This is offset from the regular decade a year is in by two.
"""
return (yr - 2) - (yr - 2) % 10
class Panel(Preprocessor, AlwaysPredictPlotter):
def __init__(self, frame,
share_column='vote_share',
group_by='state',
covariate_columns=None,
weight_column=None,
year_column='year',
redistrict_column=None,
district_id='district_id',
missing='drop',
uncontested=None,
break_on_GIGO=True):
super().__init__(frame, share_column=share_column,
covariates=covariate_columns,
weight_column=weight_column,
year_column=year_column,
redistrict_column=redistrict_column,
district_id=district_id,
missing=missing,
uncontested=uncontested,
break_on_GIGO=break_on_GIGO)
self._years = np.sort(self.long.year.unique())
self._covariate_cols += ['grouped_vs']
self._decade_starts = np.sort(
list(
set([_year_to_decade(yr)
for yr in self.years])))
self.decades = {dec: [] for dec in self._decade_starts}
for i, (yr, wide) in enumerate(zip(self.years, self.wide)):
if group_by is not None:
grouped_vs = wide.groupby(
group_by).vote_share.mean().to_frame()
grouped_vs.columns = ['grouped_vs']
grouped_vs = grouped_vs.fillna(.5)
self.wide[i] = wide.merge(
grouped_vs, left_on=group_by, right_index=True)
self.wide[i]['grouped_vs'] = self.wide[i]['grouped_vs'].fillna(
.5)
else:
grouped_vs = wide.vote_share.mean()
self.wide[i]['grouped_vs'] = grouped_vs
self.decades[_year_to_decade(yr)].append(self.wide[i])
self.models = []
for yr in self._decade_starts:
self.decades[yr] = pd.concat(self.decades[yr], axis=0, sort=True)
# WLS Yields incredibly precise simulation values? Not sure why.
X = sm.add_constant(self.decades[yr][self._covariate_cols]).values
Y = self.decades[yr].vote_share.values
Y[np.isnan(Y)] = self.decades[yr]['grouped_vs'].values[np.isnan(Y)]
if weight_column is None:
weights = None
self.models.append(sm.GLS(Y, X).fit())
else:
weights = self.decades[yr].weight
self.models.append(sm.GLS(Y, X, sigma=weights).fit())
@property
def years(self):
return self._years
@property
def params(self):
"""
All of the parameters across all models
"""
unite = pd.concat([model.params for model in self.models], axis=1)
unite.columns = self.years
return unite
def simulate_elections(self, n_sims=1000, t=-1, year=None,
target_v=None, swing=0., fix=False, predict=True):
if year is None:
year = list(self.years)[t]
else:
t = list(self.years).index(year)
decade = _year_to_decade(year)
decade_t = list(self._decade_starts).index(decade)
model = self.models[decade_t]
mask = (self.decades[decade].year == year)
X = np.asarray(self.wide[t][self._covariate_cols])
expectation = model.predict(sm.add_constant(X, has_constant='add'))
if target_v is not None:
exp_pvs = np.average(expectation, weights=self.wide[t].weight)
diff = (target_v - exp_pvs)
expectation += diff
if swing is not None:
expectation += swing
# grab the square of the cov relating to the simulations and cast to std. dev.
sigma = model.model.sigma[mask]**.5
sigma *= model.scale ** .5
sims = np.random.normal(expectation, sigma, size=(n_sims, X.shape[0]))
if fix:
sims -= sims.mean(axis=0)
return sims
| [
"numpy.average",
"numpy.asarray",
"numpy.isnan",
"numpy.random.normal",
"statsmodels.api.GLS",
"statsmodels.api.add_constant",
"pandas.concat"
] | [((3249, 3307), 'pandas.concat', 'pd.concat', (['[model.params for model in self.models]'], {'axis': '(1)'}), '([model.params for model in self.models], axis=1)\n', (3258, 3307), True, 'import pandas as pd\n'), ((3828, 3874), 'numpy.asarray', 'np.asarray', (['self.wide[t][self._covariate_cols]'], {}), '(self.wide[t][self._covariate_cols])\n', (3838, 3874), True, 'import numpy as np\n'), ((4375, 4438), 'numpy.random.normal', 'np.random.normal', (['expectation', 'sigma'], {'size': '(n_sims, X.shape[0])'}), '(expectation, sigma, size=(n_sims, X.shape[0]))\n', (4391, 4438), True, 'import numpy as np\n'), ((2464, 2510), 'pandas.concat', 'pd.concat', (['self.decades[yr]'], {'axis': '(0)', 'sort': '(True)'}), '(self.decades[yr], axis=0, sort=True)\n', (2473, 2510), True, 'import pandas as pd\n'), ((3911, 3949), 'statsmodels.api.add_constant', 'sm.add_constant', (['X'], {'has_constant': '"""add"""'}), "(X, has_constant='add')\n", (3926, 3949), True, 'import statsmodels.api as sm\n'), ((4006, 4058), 'numpy.average', 'np.average', (['expectation'], {'weights': 'self.wide[t].weight'}), '(expectation, weights=self.wide[t].weight)\n', (4016, 4058), True, 'import numpy as np\n'), ((2605, 2660), 'statsmodels.api.add_constant', 'sm.add_constant', (['self.decades[yr][self._covariate_cols]'], {}), '(self.decades[yr][self._covariate_cols])\n', (2620, 2660), True, 'import statsmodels.api as sm\n'), ((2733, 2744), 'numpy.isnan', 'np.isnan', (['Y'], {}), '(Y)\n', (2741, 2744), True, 'import numpy as np\n'), ((2786, 2797), 'numpy.isnan', 'np.isnan', (['Y'], {}), '(Y)\n', (2794, 2797), True, 'import numpy as np\n'), ((2903, 2915), 'statsmodels.api.GLS', 'sm.GLS', (['Y', 'X'], {}), '(Y, X)\n', (2909, 2915), True, 'import statsmodels.api as sm\n'), ((3026, 3053), 'statsmodels.api.GLS', 'sm.GLS', (['Y', 'X'], {'sigma': 'weights'}), '(Y, X, sigma=weights)\n', (3032, 3053), True, 'import statsmodels.api as sm\n')] |
import numpy as np
import librosa
import torch
import os
from librosa import amplitude_to_db
from math import floor
from models import modifyresnet18, UNet, Synthesizer
from util.validation import spec2wave
from image2instru import Instru_from_image
import soundfile as sf
import cv2
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
ori_SAMPLE_RATE = 44100
SAMPLE_RATE = 11000
wave_length = 66302
WINDOW_SIZE = 1022
HOP_LENGTH = 256
frequencies = np.linspace(SAMPLE_RATE/2/512,SAMPLE_RATE/2,512)
log_freq = np.log10(frequencies)
sample_freq = np.linspace(log_freq[0],log_freq[-1],256)
sample_index = np.array([np.abs(log_freq-x).argmin() for x in sample_freq])
model_dir = 'model_params_ratio/'
def Separate_and_Locate(video, audio, file_name, OutputAudio):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
video_net = modifyresnet18().to(device) #载入网络
audio_net = UNet().to(device)
syn_net = Synthesizer().to(device)
video_net.load_state_dict(torch.load(os.path.join(model_dir, 'video_net_params.pkl')))
audio_net.load_state_dict(torch.load(os.path.join(model_dir, 'audio_net_params.pkl')))
syn_net.load_state_dict(torch.load(os.path.join(model_dir, 'syn_net_params.pkl')))
print('params loaded')
print('video.shape',video.shape) #图片每秒2.4帧
#print('audio.shape',audio.shape)
audio = np.reshape(audio,(audio.shape[1]))
audio = librosa.resample(audio,ori_SAMPLE_RATE,SAMPLE_RATE)
#print('audio.shape',audio.shape)
wave_num = floor(audio.shape[0]/wave_length)
print('wave_num',wave_num)
wave_seq1 = np.zeros(audio.shape[0], dtype='float32')
wave_seq2 = np.zeros(audio.shape[0], dtype='float32')
for i in range(wave_num):
#print('i=',i)
data = audio[i*wave_length:(i+1)*wave_length]
stft_data = librosa.stft(data,n_fft = WINDOW_SIZE,hop_length = HOP_LENGTH,center = False)
stft_data = stft_data[sample_index,:] #(256,256)
stft_data_abs = np.absolute(stft_data)
#试试输入不取db
spec_input = torch.from_numpy(np.reshape(stft_data_abs,(1,1,256,256))).float().to(device)
#spec_input = torch.from_numpy(np.reshape(amplitude_to_db(stft_data_abs),(1,1,256,256))).float().to(device)
#print('spec_input.shape',spec_input.shape)
image3 = video[i*floor(2.4*wave_length/SAMPLE_RATE):i*floor(2.4*wave_length/SAMPLE_RATE)+3,:,:,:] #(3,224,224,3)
#print('image3.shape',image3.shape)
image3_1 = np.zeros((3,224,224,3), dtype='float32')
image3_2 = np.zeros((3,224,224,3), dtype='float32')
for j in range(3):
#print('j=',j)
image3_1[j,:,:,:] = cv2.resize(image3[j,:,0:112,:],(224,224))
image3_2[j,:,:,:] = cv2.resize(image3[j,:,112:224,:],(224,224))
#cv2.namedWindow("Image") #打印图片
#cv2.imshow("Image", image3_1[1])
#cv2.waitKey(0)
#cv2.imwrite('D:/test.jpg',image3_1)
#print('image3_1.shape',image3_1)
#print('image3_1.shape',image3_1.shape)
image3_1 = np.transpose(image3_1,(0,3,2,1)) #(3,224,224,3)->(3,3,224,224)
image3_1 = np.transpose(image3_1,(0,1,3,2))
image3_2 = np.transpose(image3_2,(0,3,2,1))
image3_2 = np.transpose(image3_2,(0,1,3,2))
#image3_1 = image3[:,:,:,0:112]
#image3_2 = image3[:,:,:,112:224]
#print(image3.shape)
image_input1 = torch.from_numpy(image3_1).float().to(device) #(3,3,224,224)
image_input2 = torch.from_numpy(image3_2).float().to(device)
#print('image_input.shape',image_input1.shape)
out_audio_net = audio_net(spec_input)
out1_video_net = video_net(image_input1) #送进video网络 (1,16,1,1)
#print('out_video_net',out1_video_net.shape)
out2_video_net = video_net(image_input2)
input1_syn_net = out1_video_net * out_audio_net #送进syn网络之前 (1,16,256,256)
#print('input_syn_net_0',input1_syn_net.shape)
input2_syn_net = out2_video_net * out_audio_net
input1_syn_net = torch.transpose(input1_syn_net,1,2) #转置以匹配维度
input1_syn_net = torch.transpose(input1_syn_net,2,3)
#print('input_syn_net_1',input1_syn_net.shape)
input2_syn_net = torch.transpose(input2_syn_net,1,2)
input2_syn_net = torch.transpose(input2_syn_net,2,3)
out1_syn_net = syn_net(input1_syn_net)
#print('out_syn_net_0',out1_syn_net.shape)
out2_syn_net = syn_net(input2_syn_net)
out1_syn_net = torch.transpose(out1_syn_net,2,3) #转置以匹配维度
out1_syn_net = torch.transpose(out1_syn_net,1,2)
#print('out_syn_net_1',out1_syn_net.shape)
#print(out1_syn_net)
out2_syn_net = torch.transpose(out2_syn_net,2,3)
out2_syn_net = torch.transpose(out2_syn_net,1,2) #(1,1,256,256)
mask1 = out1_syn_net[0,0,:,:].cpu().detach().numpy()
mask2 = out2_syn_net[0,0,:,:].cpu().detach().numpy()
#for j in range(mask1.shape[0]):
# for k in range(mask1.shape[1]):
# if mask1[j,k] >= mask1.mean():
# mask1[j,k] = 1
# else:
# mask1[j,k] = 0
# if mask2[j,k] >= mask2.mean():
# mask2[j,k] = 1
# else:
# mask2[j,k] = 0
#mask1 = np.round(mask1)
#mask2 = np.round(mask2)
#print(mask1)
#print(mask2)
spec_pre1 = np.multiply(mask1, stft_data)
spec_pre2 = np.multiply(mask2, stft_data)
wave_seq1[i*wave_length:(i+1)*wave_length] = spec2wave(spec_pre1)
wave_seq2[i*wave_length:(i+1)*wave_length] = spec2wave(spec_pre2)
output_file_name1 = str(file_name) + '_seg1.wav' #wav文件名
output_file_name2 = str(file_name) + '_seg2.wav'
audio_name1 = os.path.join(OutputAudio,output_file_name1)
audio_name2 = os.path.join(OutputAudio,output_file_name2)
#print(wave_seq1.shape)
wave_seq1 = librosa.resample(wave_seq1,SAMPLE_RATE,ori_SAMPLE_RATE) #升采样
wave_seq2 = librosa.resample(wave_seq2,SAMPLE_RATE,ori_SAMPLE_RATE)
#print('wave_seq.shape',wave_seq1.shape)
#print(wave_seq1.shape)
sf.write(audio_name1, wave_seq1, ori_SAMPLE_RATE) #存音频
sf.write(audio_name2, wave_seq2, ori_SAMPLE_RATE)
audio_name = [audio_name1, audio_name2]
print(audio_name)
return audio_name | [
"numpy.absolute",
"numpy.abs",
"librosa.resample",
"os.path.join",
"models.Synthesizer",
"numpy.multiply",
"numpy.transpose",
"numpy.reshape",
"numpy.linspace",
"soundfile.write",
"util.validation.spec2wave",
"numpy.log10",
"librosa.stft",
"cv2.resize",
"torch.cuda.is_available",
"torc... | [((460, 516), 'numpy.linspace', 'np.linspace', (['(SAMPLE_RATE / 2 / 512)', '(SAMPLE_RATE / 2)', '(512)'], {}), '(SAMPLE_RATE / 2 / 512, SAMPLE_RATE / 2, 512)\n', (471, 516), True, 'import numpy as np\n'), ((521, 542), 'numpy.log10', 'np.log10', (['frequencies'], {}), '(frequencies)\n', (529, 542), True, 'import numpy as np\n'), ((558, 601), 'numpy.linspace', 'np.linspace', (['log_freq[0]', 'log_freq[-1]', '(256)'], {}), '(log_freq[0], log_freq[-1], 256)\n', (569, 601), True, 'import numpy as np\n'), ((1346, 1379), 'numpy.reshape', 'np.reshape', (['audio', 'audio.shape[1]'], {}), '(audio, audio.shape[1])\n', (1356, 1379), True, 'import numpy as np\n'), ((1391, 1444), 'librosa.resample', 'librosa.resample', (['audio', 'ori_SAMPLE_RATE', 'SAMPLE_RATE'], {}), '(audio, ori_SAMPLE_RATE, SAMPLE_RATE)\n', (1407, 1444), False, 'import librosa\n'), ((1492, 1527), 'math.floor', 'floor', (['(audio.shape[0] / wave_length)'], {}), '(audio.shape[0] / wave_length)\n', (1497, 1527), False, 'from math import floor\n'), ((1569, 1610), 'numpy.zeros', 'np.zeros', (['audio.shape[0]'], {'dtype': '"""float32"""'}), "(audio.shape[0], dtype='float32')\n", (1577, 1610), True, 'import numpy as np\n'), ((1625, 1666), 'numpy.zeros', 'np.zeros', (['audio.shape[0]'], {'dtype': '"""float32"""'}), "(audio.shape[0], dtype='float32')\n", (1633, 1666), True, 'import numpy as np\n'), ((5278, 5322), 'os.path.join', 'os.path.join', (['OutputAudio', 'output_file_name1'], {}), '(OutputAudio, output_file_name1)\n', (5290, 5322), False, 'import os\n'), ((5338, 5382), 'os.path.join', 'os.path.join', (['OutputAudio', 'output_file_name2'], {}), '(OutputAudio, output_file_name2)\n', (5350, 5382), False, 'import os\n'), ((5422, 5479), 'librosa.resample', 'librosa.resample', (['wave_seq1', 'SAMPLE_RATE', 'ori_SAMPLE_RATE'], {}), '(wave_seq1, SAMPLE_RATE, ori_SAMPLE_RATE)\n', (5438, 5479), False, 'import librosa\n'), ((5497, 5554), 'librosa.resample', 'librosa.resample', (['wave_seq2', 'SAMPLE_RATE', 'ori_SAMPLE_RATE'], {}), '(wave_seq2, SAMPLE_RATE, ori_SAMPLE_RATE)\n', (5513, 5554), False, 'import librosa\n'), ((5624, 5673), 'soundfile.write', 'sf.write', (['audio_name1', 'wave_seq1', 'ori_SAMPLE_RATE'], {}), '(audio_name1, wave_seq1, ori_SAMPLE_RATE)\n', (5632, 5673), True, 'import soundfile as sf\n'), ((5681, 5730), 'soundfile.write', 'sf.write', (['audio_name2', 'wave_seq2', 'ori_SAMPLE_RATE'], {}), '(audio_name2, wave_seq2, ori_SAMPLE_RATE)\n', (5689, 5730), True, 'import soundfile as sf\n'), ((1777, 1851), 'librosa.stft', 'librosa.stft', (['data'], {'n_fft': 'WINDOW_SIZE', 'hop_length': 'HOP_LENGTH', 'center': '(False)'}), '(data, n_fft=WINDOW_SIZE, hop_length=HOP_LENGTH, center=False)\n', (1789, 1851), False, 'import librosa\n'), ((1926, 1948), 'numpy.absolute', 'np.absolute', (['stft_data'], {}), '(stft_data)\n', (1937, 1948), True, 'import numpy as np\n'), ((2382, 2425), 'numpy.zeros', 'np.zeros', (['(3, 224, 224, 3)'], {'dtype': '"""float32"""'}), "((3, 224, 224, 3), dtype='float32')\n", (2390, 2425), True, 'import numpy as np\n'), ((2437, 2480), 'numpy.zeros', 'np.zeros', (['(3, 224, 224, 3)'], {'dtype': '"""float32"""'}), "((3, 224, 224, 3), dtype='float32')\n", (2445, 2480), True, 'import numpy as np\n'), ((2878, 2914), 'numpy.transpose', 'np.transpose', (['image3_1', '(0, 3, 2, 1)'], {}), '(image3_1, (0, 3, 2, 1))\n', (2890, 2914), True, 'import numpy as np\n'), ((2955, 2991), 'numpy.transpose', 'np.transpose', (['image3_1', '(0, 1, 3, 2)'], {}), '(image3_1, (0, 1, 3, 2))\n', (2967, 2991), True, 'import numpy as np\n'), ((3002, 3038), 'numpy.transpose', 'np.transpose', (['image3_2', '(0, 3, 2, 1)'], {}), '(image3_2, (0, 3, 2, 1))\n', (3014, 3038), True, 'import numpy as np\n'), ((3049, 3085), 'numpy.transpose', 'np.transpose', (['image3_2', '(0, 1, 3, 2)'], {}), '(image3_2, (0, 1, 3, 2))\n', (3061, 3085), True, 'import numpy as np\n'), ((3768, 3805), 'torch.transpose', 'torch.transpose', (['input1_syn_net', '(1)', '(2)'], {}), '(input1_syn_net, 1, 2)\n', (3783, 3805), False, 'import torch\n'), ((3833, 3870), 'torch.transpose', 'torch.transpose', (['input1_syn_net', '(2)', '(3)'], {}), '(input1_syn_net, 2, 3)\n', (3848, 3870), False, 'import torch\n'), ((3939, 3976), 'torch.transpose', 'torch.transpose', (['input2_syn_net', '(1)', '(2)'], {}), '(input2_syn_net, 1, 2)\n', (3954, 3976), False, 'import torch\n'), ((3995, 4032), 'torch.transpose', 'torch.transpose', (['input2_syn_net', '(2)', '(3)'], {}), '(input2_syn_net, 2, 3)\n', (4010, 4032), False, 'import torch\n'), ((4179, 4214), 'torch.transpose', 'torch.transpose', (['out1_syn_net', '(2)', '(3)'], {}), '(out1_syn_net, 2, 3)\n', (4194, 4214), False, 'import torch\n'), ((4240, 4275), 'torch.transpose', 'torch.transpose', (['out1_syn_net', '(1)', '(2)'], {}), '(out1_syn_net, 1, 2)\n', (4255, 4275), False, 'import torch\n'), ((4362, 4397), 'torch.transpose', 'torch.transpose', (['out2_syn_net', '(2)', '(3)'], {}), '(out2_syn_net, 2, 3)\n', (4377, 4397), False, 'import torch\n'), ((4414, 4449), 'torch.transpose', 'torch.transpose', (['out2_syn_net', '(1)', '(2)'], {}), '(out2_syn_net, 1, 2)\n', (4429, 4449), False, 'import torch\n'), ((4939, 4968), 'numpy.multiply', 'np.multiply', (['mask1', 'stft_data'], {}), '(mask1, stft_data)\n', (4950, 4968), True, 'import numpy as np\n'), ((4984, 5013), 'numpy.multiply', 'np.multiply', (['mask2', 'stft_data'], {}), '(mask2, stft_data)\n', (4995, 5013), True, 'import numpy as np\n'), ((5062, 5082), 'util.validation.spec2wave', 'spec2wave', (['spec_pre1'], {}), '(spec_pre1)\n', (5071, 5082), False, 'from util.validation import spec2wave\n'), ((5131, 5151), 'util.validation.spec2wave', 'spec2wave', (['spec_pre2'], {}), '(spec_pre2)\n', (5140, 5151), False, 'from util.validation import spec2wave\n'), ((812, 837), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (835, 837), False, 'import torch\n'), ((864, 880), 'models.modifyresnet18', 'modifyresnet18', ([], {}), '()\n', (878, 880), False, 'from models import modifyresnet18, UNet, Synthesizer\n'), ((912, 918), 'models.UNet', 'UNet', ([], {}), '()\n', (916, 918), False, 'from models import modifyresnet18, UNet, Synthesizer\n'), ((942, 955), 'models.Synthesizer', 'Synthesizer', ([], {}), '()\n', (953, 955), False, 'from models import modifyresnet18, UNet, Synthesizer\n'), ((1006, 1053), 'os.path.join', 'os.path.join', (['model_dir', '"""video_net_params.pkl"""'], {}), "(model_dir, 'video_net_params.pkl')\n", (1018, 1053), False, 'import os\n'), ((1095, 1142), 'os.path.join', 'os.path.join', (['model_dir', '"""audio_net_params.pkl"""'], {}), "(model_dir, 'audio_net_params.pkl')\n", (1107, 1142), False, 'import os\n'), ((1182, 1227), 'os.path.join', 'os.path.join', (['model_dir', '"""syn_net_params.pkl"""'], {}), "(model_dir, 'syn_net_params.pkl')\n", (1194, 1227), False, 'import os\n'), ((2543, 2589), 'cv2.resize', 'cv2.resize', (['image3[j, :, 0:112, :]', '(224, 224)'], {}), '(image3[j, :, 0:112, :], (224, 224))\n', (2553, 2589), False, 'import cv2\n'), ((2609, 2657), 'cv2.resize', 'cv2.resize', (['image3[j, :, 112:224, :]', '(224, 224)'], {}), '(image3[j, :, 112:224, :], (224, 224))\n', (2619, 2657), False, 'import cv2\n'), ((626, 646), 'numpy.abs', 'np.abs', (['(log_freq - x)'], {}), '(log_freq - x)\n', (632, 646), True, 'import numpy as np\n'), ((2233, 2271), 'math.floor', 'floor', (['(2.4 * wave_length / SAMPLE_RATE)'], {}), '(2.4 * wave_length / SAMPLE_RATE)\n', (2238, 2271), False, 'from math import floor\n'), ((3196, 3222), 'torch.from_numpy', 'torch.from_numpy', (['image3_1'], {}), '(image3_1)\n', (3212, 3222), False, 'import torch\n'), ((3275, 3301), 'torch.from_numpy', 'torch.from_numpy', (['image3_2'], {}), '(image3_2)\n', (3291, 3301), False, 'import torch\n'), ((1995, 2038), 'numpy.reshape', 'np.reshape', (['stft_data_abs', '(1, 1, 256, 256)'], {}), '(stft_data_abs, (1, 1, 256, 256))\n', (2005, 2038), True, 'import numpy as np\n'), ((2270, 2308), 'math.floor', 'floor', (['(2.4 * wave_length / SAMPLE_RATE)'], {}), '(2.4 * wave_length / SAMPLE_RATE)\n', (2275, 2308), False, 'from math import floor\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for the LLVM CompilerGym environments."""
import gym
import numpy as np
import pytest
import compiler_gym # Register environments.
from compiler_gym.envs import CompilerEnv, llvm
from compiler_gym.envs.llvm.llvm_env import LlvmEnv
from compiler_gym.service.connection import CompilerGymServiceConnection
from tests.test_main import main
pytest_plugins = ["tests.envs.llvm.fixtures"]
@pytest.fixture(scope="function", params=["local", "service"])
def env(request) -> CompilerEnv:
# Redefine fixture to test both gym.make(...) and unmanaged service
# connections.
if request.param == "local":
env = gym.make("llvm-v0")
env.require_dataset("cBench-v0")
try:
yield env
finally:
env.close()
else:
service = CompilerGymServiceConnection(llvm.LLVM_SERVICE_BINARY)
env = LlvmEnv(service=service.connection.url, benchmark="foo")
env.require_dataset("cBench-v0")
try:
yield env
finally:
env.close()
service.close()
def test_service_env_dies_reset(env: CompilerEnv):
env.observation_space = "Autophase"
env.reward_space = "IrInstructionCount"
env.reset("cBench-v0/crc32")
# Kill the service.
env.service.close()
# Check that the environment doesn't fall over.
observation, reward, done, _ = env.step(0)
assert done
# Check that default values are returned.
np.testing.assert_array_equal(observation, np.zeros(56))
assert reward == 0
# Reset the environment and check that it works.
env.reset(benchmark="cBench-v0/crc32")
observation, reward, done, _ = env.step(0)
assert not done
assert observation is not None
assert reward is not None
if __name__ == "__main__":
main()
| [
"gym.make",
"tests.test_main.main",
"pytest.fixture",
"compiler_gym.service.connection.CompilerGymServiceConnection",
"numpy.zeros",
"compiler_gym.envs.llvm.llvm_env.LlvmEnv"
] | [((588, 649), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""', 'params': "['local', 'service']"}), "(scope='function', params=['local', 'service'])\n", (602, 649), False, 'import pytest\n'), ((1985, 1991), 'tests.test_main.main', 'main', ([], {}), '()\n', (1989, 1991), False, 'from tests.test_main import main\n'), ((821, 840), 'gym.make', 'gym.make', (['"""llvm-v0"""'], {}), "('llvm-v0')\n", (829, 840), False, 'import gym\n'), ((986, 1040), 'compiler_gym.service.connection.CompilerGymServiceConnection', 'CompilerGymServiceConnection', (['llvm.LLVM_SERVICE_BINARY'], {}), '(llvm.LLVM_SERVICE_BINARY)\n', (1014, 1040), False, 'from compiler_gym.service.connection import CompilerGymServiceConnection\n'), ((1055, 1111), 'compiler_gym.envs.llvm.llvm_env.LlvmEnv', 'LlvmEnv', ([], {'service': 'service.connection.url', 'benchmark': '"""foo"""'}), "(service=service.connection.url, benchmark='foo')\n", (1062, 1111), False, 'from compiler_gym.envs.llvm.llvm_env import LlvmEnv\n'), ((1686, 1698), 'numpy.zeros', 'np.zeros', (['(56)'], {}), '(56)\n', (1694, 1698), True, 'import numpy as np\n')] |
from __future__ import print_function, division
import os
import sys
root = os.path.join(os.getcwd().split('src')[0], 'src')
if root not in sys.path:
sys.path.append(root)
from oracle.models import rf_model
from metrics.abcd import abcd
from pdb import set_trace
import numpy as np
import pandas
from tabulate import tabulate
from datasets.handler2 import get_all_datasets
def weight_training(test_instance, training_instance):
head = training_instance.columns
new_train = training_instance[head[:-1]]
new_train = (new_train - test_instance[head[:-1]].mean()) / test_instance[head[:-1]].std()
new_train[head[-1]] = training_instance[head[-1]]
new_train.dropna(axis=1, inplace=True)
tgt = new_train.columns
new_test = (test_instance[tgt[:-1]] - test_instance[tgt[:-1]].mean()) / (
test_instance[tgt[:-1]].std())
new_test[tgt[-1]] = test_instance[tgt[-1]]
new_test.dropna(axis=1, inplace=True)
columns = list(set(tgt[:-1]).intersection(new_test.columns[:-1])) + [tgt[-1]]
return new_train[columns], new_test[columns]
def predict_defects(train, test):
"""
Perform Code-Smell Prediction
:param train:
:type train:
:param test:
:type test:
:return:
"""
actual = test[test.columns[-1]].values.tolist()
predicted, distr = rf_model(train, test)
return actual, predicted, distr
def bellw(source, target, verbose=True, n_rep=12):
"""
TNB: Transfer Naive Bayes
:param source:
:param target:
:param n_rep: number of repeats
:return: result
"""
result = dict()
for tgt_name, tgt_path in target.iteritems():
stats = []
charts = []
if verbose: print("{} \r".format(tgt_name[0].upper() + tgt_name[1:]))
val = []
for src_name, src_path in source.iteritems():
if not src_name == tgt_name:
src = pandas.read_csv(src_path)
tgt = pandas.read_csv(tgt_path)
pd, pf, pr, f1, g, auc = [], [], [], [], [], []
for _ in xrange(n_rep):
_train, __test = weight_training(test_instance=tgt, training_instance=src)
actual, predicted, distribution = predict_defects(train=_train, test=__test)
p_d, p_f, p_r, rc, f_1, e_d, _g, auroc = abcd(actual, predicted, distribution)
pd.append(p_d)
pf.append(p_f)
pr.append(p_r)
f1.append(f_1)
g.append(_g)
auc.append(int(auroc))
stats.append([src_name, int(np.mean(pd)), int(np.mean(pf)),
int(np.mean(pr)), int(np.mean(f1)),
int(np.mean(g)), int(np.mean(auc))]) # ,
stats = pandas.DataFrame(sorted(stats, key=lambda lst: lst[-2], reverse=True), # Sort by G Score
columns=["Name", "Pd", "Pf", "Prec", "F1", "G", "AUC"]) # ,
if verbose: print(tabulate(stats,
headers=["Name", "Pd", "Pf", "Prec", "F1", "G", "AUC"],
showindex="never",
tablefmt="fancy_grid"))
result.update({tgt_name: stats})
return result
def tnb_jur():
all = get_all_datasets()
for name, paths in all.iteritems():
if name == "LongMethod":
bellw(paths, paths, verbose=True, n_rep=10)
# set_trace()
if __name__ == "__main__":
tnb_jur()
| [
"sys.path.append",
"datasets.handler2.get_all_datasets",
"pandas.read_csv",
"os.getcwd",
"metrics.abcd.abcd",
"numpy.mean",
"tabulate.tabulate",
"oracle.models.rf_model"
] | [((156, 177), 'sys.path.append', 'sys.path.append', (['root'], {}), '(root)\n', (171, 177), False, 'import sys\n'), ((1320, 1341), 'oracle.models.rf_model', 'rf_model', (['train', 'test'], {}), '(train, test)\n', (1328, 1341), False, 'from oracle.models import rf_model\n'), ((3294, 3312), 'datasets.handler2.get_all_datasets', 'get_all_datasets', ([], {}), '()\n', (3310, 3312), False, 'from datasets.handler2 import get_all_datasets\n'), ((91, 102), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (100, 102), False, 'import os\n'), ((1893, 1918), 'pandas.read_csv', 'pandas.read_csv', (['src_path'], {}), '(src_path)\n', (1908, 1918), False, 'import pandas\n'), ((1941, 1966), 'pandas.read_csv', 'pandas.read_csv', (['tgt_path'], {}), '(tgt_path)\n', (1956, 1966), False, 'import pandas\n'), ((3022, 3139), 'tabulate.tabulate', 'tabulate', (['stats'], {'headers': "['Name', 'Pd', 'Pf', 'Prec', 'F1', 'G', 'AUC']", 'showindex': '"""never"""', 'tablefmt': '"""fancy_grid"""'}), "(stats, headers=['Name', 'Pd', 'Pf', 'Prec', 'F1', 'G', 'AUC'],\n showindex='never', tablefmt='fancy_grid')\n", (3030, 3139), False, 'from tabulate import tabulate\n'), ((2324, 2361), 'metrics.abcd.abcd', 'abcd', (['actual', 'predicted', 'distribution'], {}), '(actual, predicted, distribution)\n', (2328, 2361), False, 'from metrics.abcd import abcd\n'), ((2624, 2635), 'numpy.mean', 'np.mean', (['pd'], {}), '(pd)\n', (2631, 2635), True, 'import numpy as np\n'), ((2642, 2653), 'numpy.mean', 'np.mean', (['pf'], {}), '(pf)\n', (2649, 2653), True, 'import numpy as np\n'), ((2690, 2701), 'numpy.mean', 'np.mean', (['pr'], {}), '(pr)\n', (2697, 2701), True, 'import numpy as np\n'), ((2708, 2719), 'numpy.mean', 'np.mean', (['f1'], {}), '(f1)\n', (2715, 2719), True, 'import numpy as np\n'), ((2756, 2766), 'numpy.mean', 'np.mean', (['g'], {}), '(g)\n', (2763, 2766), True, 'import numpy as np\n'), ((2773, 2785), 'numpy.mean', 'np.mean', (['auc'], {}), '(auc)\n', (2780, 2785), True, 'import numpy as np\n')] |
# Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import onnx # noqa
import numpy as np
import torch
import brevitas.onnx as bo
from brevitas.nn import QuantHardTanh
from brevitas.core.restrict_val import RestrictValueType
from brevitas.core.scaling import ScalingImplType
import pytest
from finn.core.modelwrapper import ModelWrapper
import finn.core.onnx_exec as oxe
from finn.transformation.infer_shapes import InferShapes
from brevitas.core.quant import QuantType
export_onnx_path = "test_brevitas_non_scaled_QuantHardTanh_export.onnx"
@pytest.mark.parametrize("abits", [1, 2, 4, 8])
@pytest.mark.parametrize("narrow_range", [False, True])
@pytest.mark.parametrize("max_val", [1.0, 1 - 2 ** (-7)])
def test_brevitas_act_export_qhardtanh_nonscaled(abits, narrow_range, max_val):
def get_quant_type(bit_width):
if bit_width is None:
return QuantType.FP
elif bit_width == 1:
return QuantType.BINARY
else:
return QuantType.INT
act_quant_type = get_quant_type(abits)
min_val = -1.0
ishape = (1, 10)
b_act = QuantHardTanh(
bit_width=abits,
quant_type=act_quant_type,
max_val=max_val,
min_val=min_val,
restrict_scaling_type=RestrictValueType.LOG_FP,
scaling_impl_type=ScalingImplType.CONST,
narrow_range=narrow_range,
)
bo.export_finn_onnx(b_act, ishape, export_onnx_path)
model = ModelWrapper(export_onnx_path)
model = model.transform(InferShapes())
inp_tensor = np.random.uniform(low=min_val, high=max_val, size=ishape).astype(
np.float32
)
idict = {model.graph.input[0].name: inp_tensor}
odict = oxe.execute_onnx(model, idict, True)
produced = odict[model.graph.output[0].name]
inp_tensor = torch.from_numpy(inp_tensor).float()
expected = b_act.forward(inp_tensor).detach().numpy()
assert np.isclose(produced, expected, atol=1e-3).all()
os.remove(export_onnx_path)
| [
"numpy.random.uniform",
"os.remove",
"finn.core.modelwrapper.ModelWrapper",
"finn.transformation.infer_shapes.InferShapes",
"finn.core.onnx_exec.execute_onnx",
"numpy.isclose",
"pytest.mark.parametrize",
"brevitas.nn.QuantHardTanh",
"brevitas.onnx.export_finn_onnx",
"torch.from_numpy"
] | [((2023, 2069), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""abits"""', '[1, 2, 4, 8]'], {}), "('abits', [1, 2, 4, 8])\n", (2046, 2069), False, 'import pytest\n'), ((2071, 2125), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""narrow_range"""', '[False, True]'], {}), "('narrow_range', [False, True])\n", (2094, 2125), False, 'import pytest\n'), ((2127, 2181), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""max_val"""', '[1.0, 1 - 2 ** -7]'], {}), "('max_val', [1.0, 1 - 2 ** -7])\n", (2150, 2181), False, 'import pytest\n'), ((2569, 2784), 'brevitas.nn.QuantHardTanh', 'QuantHardTanh', ([], {'bit_width': 'abits', 'quant_type': 'act_quant_type', 'max_val': 'max_val', 'min_val': 'min_val', 'restrict_scaling_type': 'RestrictValueType.LOG_FP', 'scaling_impl_type': 'ScalingImplType.CONST', 'narrow_range': 'narrow_range'}), '(bit_width=abits, quant_type=act_quant_type, max_val=max_val,\n min_val=min_val, restrict_scaling_type=RestrictValueType.LOG_FP,\n scaling_impl_type=ScalingImplType.CONST, narrow_range=narrow_range)\n', (2582, 2784), False, 'from brevitas.nn import QuantHardTanh\n'), ((2844, 2896), 'brevitas.onnx.export_finn_onnx', 'bo.export_finn_onnx', (['b_act', 'ishape', 'export_onnx_path'], {}), '(b_act, ishape, export_onnx_path)\n', (2863, 2896), True, 'import brevitas.onnx as bo\n'), ((2909, 2939), 'finn.core.modelwrapper.ModelWrapper', 'ModelWrapper', (['export_onnx_path'], {}), '(export_onnx_path)\n', (2921, 2939), False, 'from finn.core.modelwrapper import ModelWrapper\n'), ((3155, 3191), 'finn.core.onnx_exec.execute_onnx', 'oxe.execute_onnx', (['model', 'idict', '(True)'], {}), '(model, idict, True)\n', (3171, 3191), True, 'import finn.core.onnx_exec as oxe\n'), ((3416, 3443), 'os.remove', 'os.remove', (['export_onnx_path'], {}), '(export_onnx_path)\n', (3425, 3443), False, 'import os\n'), ((2968, 2981), 'finn.transformation.infer_shapes.InferShapes', 'InferShapes', ([], {}), '()\n', (2979, 2981), False, 'from finn.transformation.infer_shapes import InferShapes\n'), ((3000, 3057), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'min_val', 'high': 'max_val', 'size': 'ishape'}), '(low=min_val, high=max_val, size=ishape)\n', (3017, 3057), True, 'import numpy as np\n'), ((3258, 3286), 'torch.from_numpy', 'torch.from_numpy', (['inp_tensor'], {}), '(inp_tensor)\n', (3274, 3286), False, 'import torch\n'), ((3364, 3406), 'numpy.isclose', 'np.isclose', (['produced', 'expected'], {'atol': '(0.001)'}), '(produced, expected, atol=0.001)\n', (3374, 3406), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.