repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
monikascholz/pWARP | fluowarp.py | 1 | 9385 |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 4 19:53:51 2014
Phase correlation drift correction.
Used papers Cross-correlation image tracking for drift correction and
adsorbate analysis B. A. Mantooth, Z. J. Donhauser, K. F. Kelly, and P. S. Weiss
for inspiration.
@author: Monika Kauer
"""
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from scipy import ndimage
import os, sys
import scipy.misc as misc
import pump_writer
##===================================================#
# image generator in/output
## ==================================================#
def read_sequentially(params, intrvl = 1):
"""Function that reads sequentially with every call."""
filenames = os.listdir(params['directory'])
filenames = [f for f in filenames if ".%s"% params["type"] in f]
filenames = pump_writer.natural_sort(filenames)
filenames2 = filenames[params["start"]:params["end"]:intrvl]
#deal with non-divisor chunk lengths
if (params["end"]-params["start"])%intrvl != 0:
try:
filenames2.append(filenames[params["end"]])
params['nof'] = (params["end"]-params["start"])
except IndexError:
filenames2.append(filenames[-1])
params['nof'] = (len(filenames)-params["start"])
elif intrvl > 1 and (params["end"]-params["start"])%intrvl == 0:
try:
filenames2.append(filenames[params["end"]])
params['nof'] = (params["end"]-params["start"])
except IndexError:
filenames2.append(filenames[-1])
params['nof'] = (len(filenames)-params["start"])
for filename in filenames2:
yield read_img(params['directory']+filename, params)
def read_img(fname, params):
"""reads an image file as array."""
try:
image = misc.imread(fname, flatten=True)
data = np.asarray(image, dtype = np.int64)
if params['rotate']:
data = np.transpose(data)
if params['cropx']:
data = data[:,params['cropx'][0]:params['cropx'][1]]
return data
except IOError:
print fname
pass
##===================================================#
# image registration
## ==================================================#
def reg(im1, im2, params):
"""Find image-image correlation and translation vector using FFTs."""
# use hanning window. Reduces the edge effect from finite size
shape= np.array(im1.shape)
fft_im1 = np.fft.fft2(im1)
fft_im2 = np.conj(np.fft.fft2(im2))
corr = np.fft.ifft2(fft_im1*fft_im2).real
corr = ndimage.gaussian_filter(corr, .5) - ndimage.gaussian_filter(corr, 30)
t0, t1 = np.unravel_index(np.argmax(corr), shape)
if t0 > shape[0] // 2:
t0 -= shape[0]
if t1 > shape[1] // 2:
t1 -= shape[1]
return corr, [t0, 0]
def find_roi(params):
"""calculates image drift using registration via correlation."""
im = read_sequentially(params, intrvl = params["chunk"])
roi = [[0,0]]
im_new = im.next()
height, width = im_new.shape
try:
while True: #go through all image chunks from start to end
im_old = im_new
im_new = im.next()
im1 = np.where(im_old>np.median(im_old), 1,0)
im2 = np.where(im_new>np.median(im_new), 1,0)
_,drift = reg(im1, im2, params)
roi.append(drift)
except StopIteration:
pass
finally:
del im
return np.array(roi)
def interpol_drift(drift, params):
"""Returns linearly interpolated ROI.
This uses drift calculation where drift comes from adjacent reference frame."""
x = np.cumsum(drift[:,1])
y = np.cumsum(drift[:,0])
r = np.zeros((params['nof'],2))
dr = params['nof']%params['chunk']
for cnt in xrange(1,len(r)-dr):
index = float(cnt)/(params["chunk"])
i = int(index)+1
vy, vx = y[i]-y[i-1], x[i]-x[i-1]
#r[cnt] = (index%1*vy)+y[i-1],(index%1*vx)+x[i-1]
r[cnt] = y[i-1]+(index%1)*vy,x[i-1]+(index%1)*vx
if cnt == len(r)-dr-1:
#this deals with leftover interval if images%chunk!=0
for rest in xrange(1,dr+1):
index = float(rest)/dr+1
vy, vx = y[i]-y[i-1], x[i]-x[i-1]
r[cnt+rest] = y[i-1]+(index%1*vy),x[i-1]+(index%1*vx)
return r
##===================================================#
# feature detection for neuron physiological imaging
## ==================================================#
def fluorescence(params, roi):
"""finds a neuron from images using thresholding
in a region of interest."""
images = read_sequentially(params)
values,locations = [],[]
try:
cnt = 0
imgs = ndimage.shift(images.next(), roi[cnt], mode="wrap")
cms_old = [params['y0'],params['x0']]
#print "cms_old is ",cms_old
val_old = []
y0, x0 = cms_old
height, width = imgs.shape
cnt += 1
while True:
y1, x1, fluor, bg = similarity3(imgs, cms_old,[y0,x0], params)
#implement a short memory of neuron position
val_old.append([y1, x1])
y0 = np.average([v[0] for v in val_old[-10:]])
x0 = np.average([v[1] for v in val_old[-10:]])
values.append([fluor, bg])
locations.append([y1-roi[cnt-1][0],x1+params["cropx"][0]-roi[cnt-1][1]])
imgs = ndimage.shift(images.next(), roi[cnt], mode="wrap")
cnt += 1
except StopIteration:
pass
finally:
del images
return np.array(values), np.array(locations)
def similarity3(im1, cms,old_coor, params):
"""Calculates fluorescence of neuron by thresholding."""
bgsize = params["bgsize"]
part1 = im1[max(0,cms[0]-bgsize):cms[0]+bgsize, max(0,cms[1]-bgsize):cms[1]+bgsize]
offsety, offsetx = max(0,cms[0]-bgsize), max(0,cms[1]-bgsize)
height, width = part1.shape
y0,x0 = old_coor #previous coords
thresh = np.sort(part1, axis=None)[-int(params["thresh_pump"]*height*width)]
#print "threshold is", thresh
mask = np.where(part1 > thresh, 1, 0)
mask = ndimage.binary_opening(mask,structure = np.ones((2,2)))
mask = ndimage.binary_closing(mask)
label_im, nb_labels = ndimage.label(mask)
centroids = ndimage.measurements.center_of_mass(part1, label_im, xrange(1,nb_labels+1))
dist = []
for index, coord in enumerate(centroids):
y,x= coord
dist.append((y-y0+offsety)**2 + (x-x0+offsetx)**2)
if min(dist)>2*params["max_movement"]**2:
print dist, y0,x0, offsety, offsetx,
y,x = y0-offsety,x0-offsetx
radius = params["roisize"]
neuron = part1[max(0,y-radius):y+radius,max(0,x-radius):x+radius,]
value = np.ma.average(np.sort(neuron, axis=None)[-20:])
else:
loc = np.argmin(dist)
y,x = centroids[loc]
remove_pixel = np.where(label_im ==loc+1,0,1)
neuron = np.ma.masked_array(part1, remove_pixel)
value = np.ma.average(neuron)
try:
radius = params["roisize"]
mask1 = np.zeros(part1.shape, dtype=bool)
mask1[max(0,y-radius):y+radius,max(0,x-radius):x+radius,] = True
bg_mask = np.ma.mask_or(mask,mask1)
bg = np.ma.masked_array(part1, bg_mask)
bg_level = np.ma.average(bg)
except IndexError:
y,x=y0,x0
value=0
bg_level=0
return y+offsety, x+offsetx, value, bg_level
##===================================================#
# Main
## ==================================================#
def warp_detector(params):
##===================================================#
# Translation correction
## ==================================================#
drift = find_roi(params)
drift = interpol_drift(drift, params)
print "done with drift"
sys.stdout.flush()
##===================================================#
# detect pumping
## ==================================================#
coords = fluorescence(params, drift)
time = np.arange(params["start"], params["start"]+len(coords),1)
out_data = zip(time,coords[:,0], coords[:,1],coords[:,2])
print "Analysis of: ",params["start"], params["end"]
##===================================================#
# write results and movie
## ==================================================#
if len(coords) > 0:
outputstring = "%s_%i_%i"%(params["basename"],params["start"],params["end"]-1)
pump_writer.write_data(params["outdir"], outputstring+"_kymo", out_data, 4)
images = read_sequentially(params)
fig = plt.figure(params["start"]+1) #make unique figures needed for parallelization
ax1 = fig.add_subplot(211)
pump_writer.make_kymograph(images, params, diff=False, roi=drift)
ax1.plot(cms[:,0]+drift[:,0])
ax1.set_xlim([0,len(cms)])
ax2 = fig.add_subplot(212)
ax2.plot(coords[:,0])
ax2.plot(coords[:,1])
ax2.set_xlim([0,len(coords)])
ax2.set_ylim(ax2.get_ylim()[::-1])
fig.savefig(params["outdir"]+"/"+outputstring+"_kym.png")
| gpl-2.0 |
nrhine1/scikit-learn | sklearn/neighbors/base.py | 22 | 31143 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl>
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array, _get_n_jobs, gen_even_slices
from ..utils.fixes import argpartition
from ..utils.validation import DataConversionWarning
from ..utils.validation import NotFittedError
from ..externals import six
from ..externals.joblib import Parallel, delayed
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
if kwargs:
warnings.warn("Passing additional arguments to the metric "
"function as **kwargs is deprecated "
"and will no longer be supported in 0.18. "
"Use metric_params instead.",
DeprecationWarning, stacklevel=3)
if metric_params is None:
metric_params = {}
metric_params.update(kwargs)
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
self.n_jobs = n_jobs
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
if metric == 'precomputed':
alg_check = 'brute'
else:
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if ((self.n_neighbors is None or
self.n_neighbors < self._fit_X.shape[0] // 2) and
self.metric != 'precomputed'):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
if self.n_neighbors is not None:
if self.n_neighbors <= 0:
raise ValueError(
"Expected n_neighbors > 0. Got %d" %
self.n_neighbors
)
return self
@property
def _pairwise(self):
# For cross-validation routines to split data correctly
return self.metric == 'precomputed'
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
n_jobs = _get_n_jobs(self.n_jobs)
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
n_jobs=n_jobs, squared=True)
else:
dist = pairwise_distances(
X, self._fit_X, self.effective_metric_, n_jobs=n_jobs,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = Parallel(n_jobs, backend='threading')(
delayed(self._tree.query, check_pickle=False)(
X[s], n_neighbors, return_distance)
for s in gen_even_slices(X.shape[0], n_jobs)
)
if return_distance:
dist, neigh_ind = tuple(zip(*result))
result = np.vstack(dist), np.vstack(neigh_ind)
else:
result = np.vstack(result)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([1., 1., 1.])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
"""
return self._fit(X)
| bsd-3-clause |
LiaoPan/scikit-learn | examples/ensemble/plot_bias_variance.py | 357 | 7324 | """
============================================================
Single estimator versus bagging: bias-variance decomposition
============================================================
This example illustrates and compares the bias-variance decomposition of the
expected mean squared error of a single estimator against a bagging ensemble.
In regression, the expected mean squared error of an estimator can be
decomposed in terms of bias, variance and noise. On average over datasets of
the regression problem, the bias term measures the average amount by which the
predictions of the estimator differ from the predictions of the best possible
estimator for the problem (i.e., the Bayes model). The variance term measures
the variability of the predictions of the estimator when fit over different
instances LS of the problem. Finally, the noise measures the irreducible part
of the error which is due the variability in the data.
The upper left figure illustrates the predictions (in dark red) of a single
decision tree trained over a random dataset LS (the blue dots) of a toy 1d
regression problem. It also illustrates the predictions (in light red) of other
single decision trees trained over other (and different) randomly drawn
instances LS of the problem. Intuitively, the variance term here corresponds to
the width of the beam of predictions (in light red) of the individual
estimators. The larger the variance, the more sensitive are the predictions for
`x` to small changes in the training set. The bias term corresponds to the
difference between the average prediction of the estimator (in cyan) and the
best possible model (in dark blue). On this problem, we can thus observe that
the bias is quite low (both the cyan and the blue curves are close to each
other) while the variance is large (the red beam is rather wide).
The lower left figure plots the pointwise decomposition of the expected mean
squared error of a single decision tree. It confirms that the bias term (in
blue) is low while the variance is large (in green). It also illustrates the
noise part of the error which, as expected, appears to be constant and around
`0.01`.
The right figures correspond to the same plots but using instead a bagging
ensemble of decision trees. In both figures, we can observe that the bias term
is larger than in the previous case. In the upper right figure, the difference
between the average prediction (in cyan) and the best possible model is larger
(e.g., notice the offset around `x=2`). In the lower right figure, the bias
curve is also slightly higher than in the lower left figure. In terms of
variance however, the beam of predictions is narrower, which suggests that the
variance is lower. Indeed, as the lower right figure confirms, the variance
term (in green) is lower than for single decision trees. Overall, the bias-
variance decomposition is therefore no longer the same. The tradeoff is better
for bagging: averaging several decision trees fit on bootstrap copies of the
dataset slightly increases the bias term but allows for a larger reduction of
the variance, which results in a lower overall mean squared error (compare the
red curves int the lower figures). The script output also confirms this
intuition. The total error of the bagging ensemble is lower than the total
error of a single decision tree, and this difference indeed mainly stems from a
reduced variance.
For further details on bias-variance decomposition, see section 7.3 of [1]_.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning", Springer, 2009.
"""
print(__doc__)
# Author: Gilles Louppe <g.louppe@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
# Settings
n_repeat = 50 # Number of iterations for computing expectations
n_train = 50 # Size of the training set
n_test = 1000 # Size of the test set
noise = 0.1 # Standard deviation of the noise
np.random.seed(0)
# Change this for exploring the bias-variance decomposition of other
# estimators. This should work well for estimators with high variance (e.g.,
# decision trees or KNN), but poorly for estimators with low variance (e.g.,
# linear models).
estimators = [("Tree", DecisionTreeRegressor()),
("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))]
n_estimators = len(estimators)
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
def generate(n_samples, noise, n_repeat=1):
X = np.random.rand(n_samples) * 10 - 5
X = np.sort(X)
if n_repeat == 1:
y = f(X) + np.random.normal(0.0, noise, n_samples)
else:
y = np.zeros((n_samples, n_repeat))
for i in range(n_repeat):
y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)
X = X.reshape((n_samples, 1))
return X, y
X_train = []
y_train = []
for i in range(n_repeat):
X, y = generate(n_samples=n_train, noise=noise)
X_train.append(X)
y_train.append(y)
X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)
# Loop over estimators to compare
for n, (name, estimator) in enumerate(estimators):
# Compute predictions
y_predict = np.zeros((n_test, n_repeat))
for i in range(n_repeat):
estimator.fit(X_train[i], y_train[i])
y_predict[:, i] = estimator.predict(X_test)
# Bias^2 + Variance + Noise decomposition of the mean squared error
y_error = np.zeros(n_test)
for i in range(n_repeat):
for j in range(n_repeat):
y_error += (y_test[:, j] - y_predict[:, i]) ** 2
y_error /= (n_repeat * n_repeat)
y_noise = np.var(y_test, axis=1)
y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2
y_var = np.var(y_predict, axis=1)
print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) "
" + {3:.4f} (var) + {4:.4f} (noise)".format(name,
np.mean(y_error),
np.mean(y_bias),
np.mean(y_var),
np.mean(y_noise)))
# Plot figures
plt.subplot(2, n_estimators, n + 1)
plt.plot(X_test, f(X_test), "b", label="$f(x)$")
plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$")
for i in range(n_repeat):
if i == 0:
plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$")
else:
plt.plot(X_test, y_predict[:, i], "r", alpha=0.05)
plt.plot(X_test, np.mean(y_predict, axis=1), "c",
label="$\mathbb{E}_{LS} \^y(x)$")
plt.xlim([-5, 5])
plt.title(name)
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.subplot(2, n_estimators, n_estimators + n + 1)
plt.plot(X_test, y_error, "r", label="$error(x)$")
plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"),
plt.plot(X_test, y_var, "g", label="$variance(x)$"),
plt.plot(X_test, y_noise, "c", label="$noise(x)$")
plt.xlim([-5, 5])
plt.ylim([0, 0.1])
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.show()
| bsd-3-clause |
Cophy08/ggplot | ggplot/tests/test_element_text.py | 12 | 1362 | from nose.tools import assert_equal, assert_true
from ggplot.tests import image_comparison, cleanup
from ggplot import *
from numpy import linspace
from pandas import DataFrame
df = DataFrame({"blahblahblah": linspace(999, 1111, 9),
"yadayadayada": linspace(999, 1111, 9)})
simple_gg = ggplot(aes(x="blahblahblah", y="yadayadayada"), data=df) + geom_line()
@image_comparison(["all_text"], tol=13)
def test_element_text1():
print(simple_gg + theme(text=element_text(family="serif", face="bold",
size=50, color="red", angle=45)))
@image_comparison(["axis_text"], tol=13)
def test_element_text2():
#print(simple_gg)
print(simple_gg +
theme(text=element_text(face="bold", size=50, color="red")) +
theme(axis_text=element_text(color="green", angle=45)))
@image_comparison(["axis_title"], tol=13)
def test_element_text3():
print (simple_gg +
theme(text=element_text(face="bold", color="red")) +
theme(axis_title=element_text(color="purple", size=50)))
@image_comparison(["axis_title_text"], tol=15)
def test_element_text4():
print(simple_gg +
theme(text=element_text(face="bold", color="red")) +
theme(axis_text_y=element_text(color="green", size=50)) +
theme(axis_title=element_text(color="blue", size=50)))
| bsd-2-clause |
hgn/pmu-tools | interval-plot.py | 3 | 3566 | #!/usr/bin/python
# plot interval CSV output from perf/toplev
# perf stat -I1000 -x, -o file ...
# toplev -I1000 -x, -o file ...
# interval-plot.py file (or stdin)
# delimeter must be ,
# this is for data that is not normalized
# TODO: move legend somewhere else where it doesn't overlap?
import csv
import sys
import matplotlib.pyplot as plt
import collections
import argparse
import re
p = argparse.ArgumentParser(
usage='plot interval CSV output from perf stat/toplev',
description='''
perf stat -I1000 -x, -o file ...
toplev -I1000 -x, -o file ...
interval-plot.py file (or stdin)
delimeter must be ,
this is for data that is not normalized.''')
p.add_argument('--xkcd', action='store_true', help='enable xkcd mode')
p.add_argument('--style', help='set mpltools style (e.g. ggplot)')
p.add_argument('file', help='CSV file to plot (or stdin)', nargs='?')
p.add_argument('--output', '-o', help='Output to file. Otherwise show.',
nargs='?')
args = p.parse_args()
if args.style:
try:
from mpltools import style
style.use(args.style)
except ImportError:
print "Need mpltools for setting styles (pip install mpltools)"
import gen_level
try:
import brewer2mpl
all_colors = brewer2mpl.get_map('Paired', 'Qualitative', 12).hex_colors
except ImportError:
print "Install brewer2mpl for better colors (pip install brewer2mpl)"
all_colors = ('green','orange','red','blue',
'black','olive','purple','#6960EC', '#F0FFFF',
'#728C00', '#827B60', '#F87217', '#E55451', # 16
'#F88017', '#C11B17', '#17BFC2', '#C48793') # 20
cur_colors = collections.defaultdict(lambda: all_colors)
assigned = dict()
if args.file:
inf = open(args.file, "r")
else:
inf = sys.stdin
rc = csv.reader(inf)
timestamps = dict()
value = dict()
def isnum(x):
return re.match(r'[0-9.]+', x)
val = ""
for r in rc:
# timestamp,event,value
if len(r) < 3:
continue
print r
if len(r) >= 5 and not isnum(r[1]):
ts, event, val, thresh, desc = r[:5]
elif len(r) >= 4:
ts, val, unit, event = r[:4]
else:
ts, val, event = r
if event not in assigned:
level = gen_level.get_level(event)
assigned[event] = cur_colors[level][0]
cur_colors[level] = cur_colors[level][1:]
if len(cur_colors[level]) == 0:
cur_colors[level] = all_colors
value[event] = []
timestamps[event] = []
timestamps[event].append(float(ts))
try:
value[event].append(float(val.replace("%","")))
except ValueError:
value[event].append(0.0)
levels = dict()
for j in assigned.keys():
levels[gen_level.get_level(j)] = True
if args.xkcd:
try:
plt.xkcd()
except NameError:
print "Please update matplotlib. Cannot enable xkcd mode."
n = 1
for l in levels.keys():
ax = plt.subplot(len(levels), 1, n)
if val.find('%') >= 0:
ax.set_ylim(0, 100)
t = []
for j in assigned.keys():
print j, gen_level.get_level(j), l
if gen_level.get_level(j) == l:
t.append(j)
if 'style' not in globals():
ax.plot(timestamps[j], value[j], assigned[j])
else:
ax.plot(timestamps[j], value[j])
leg = ax.legend(t, loc='upper left')
leg.get_frame().set_alpha(0.5)
n += 1
plt.xlabel('Time')
if val.find('%') >= 0:
plt.ylabel('Bottleneck %')
else:
plt.ylabel("Counter value")
if args.output:
plt.savefig(args.output)
else:
plt.show()
| gpl-2.0 |
phronesis-mnemosyne/census-schema-alignment | algn-merge.py | 1 | 4062 | import re
import json
import argparse
import numpy as np
import pandas as pd
import sys
sys.path.append('wit')
from mmd import *
from munkres import Munkres
# --
# Alignment functions
def align(dist):
'''
Munkres alignment between a single pair of schemas
'''
if dist.shape[0] > dist.shape[1]:
return None
else:
mmap = Munkres().compute(np.array(dist))
mmap = [ (dist.index[x[0]], dist.columns[x[1]]) for x in mmap ]
mmap = [ (x[0], x[1], np.round(dist[x[1]].loc[x[0]], 3)) for x in mmap ]
algn = pd.DataFrame(mmap)
algn.columns = ('hash_1', 'hash_2', 'cost')
algn['variable_1'] = algn['hash_1'].apply(lambda x: x.split('-')[-1])
algn['variable_2'] = algn['hash_2'].apply(lambda x: x.split('-')[-1])
return algn
def all_alignments(dist, uschemas, verbose = False):
'''
Iterate `align` over all pairs of schemas
'''
alignments = []
for s1 in uschemas:
if verbose:
print s1
for s2 in uschemas:
if s1 != s2:
dist_ = dist.copy()
dist_ = dist_[filter(lambda x: s1 in x, dist_.columns)]
dist_ = dist_.loc[filter(lambda x: s2 in x, dist_.index)]
algn = align(dist_)
if np.any(algn):
algn['schema_1'] = s1
algn['schema_2'] = s2
alignments.append(algn)
alignments = pd.concat(alignments).drop_duplicates().reset_index()
return alignments
def resolve_set(sets):
'''
Parse the strings that are created by `merge_pair` into sets of equivalent hashes.
'''
sets = [f.group(0) for f in re.finditer(r'\[[^\[\]]*\]', sets)]
sets = filter(lambda x: x != '[None]', sets)
sets = map(lambda x: re.sub('\[|\]', '', x), sets)
sets = map(lambda x: re.sub('(^\w*)__', r'\1-', x), sets)
return sets
def merge_pair(dpreds, uschema):
'''
Merge pair of schemas that have the lowest total cost.
Repeating this will merge all schemas to a single schema.
'''
dist = dfdist(dpreds, dist_dist = True)
a = all_alignments(dist, uschema)
# Find pairwise merge with lowest total cost
pair_costs = a.groupby(['schema_1', 'schema_2']).cost.apply(np.mean).reset_index()
pair_costs.sort_values('cost', inplace = True)
best_merge = list(pair_costs.iloc[0][['schema_1', 'schema_2']])
# Construct map for corresponding hashes
links = a[a.schema_1.isin(best_merge) & a.schema_2.isin(best_merge)][['hash_1', 'hash_2']]
links = [(r[1], r[2]) for r in links.to_records()]
dl = dict(links + [(l[1], l[0]) for l in links])
# Map hashes together
sel = dpreds.lab.apply(lambda x: x.split('-')[0]).isin(best_merge)
uhash = dpreds.lab[sel].unique()
for uh in uhash:
tmp = tuple(sorted((uh, dl.get(uh, None))))
dpreds.lab[dpreds.lab == uh] = '{' + str(N) + '}' + '-' + re.sub('-', '__', '[%s][%s]' % tmp)
return dpreds
# --
# CLI
parser = argparse.ArgumentParser()
parser.add_argument('--infile', dest = 'infile', required = True)
parser.add_argument('--outfile', dest = 'outfile', required = True)
args = parser.parse_args()
config = {
'infile' : args.infile,
'outfile' : args.outfile
}
# --
print '-- loading datasets --'
inp = np.load(config['infile']).item()
dpreds = pd.DataFrame(inp['preds'])
dpreds['lab'] = map(lambda x: re.sub('(^[^-]*)-', '\\1', x), np.array(inp['levs'])[inp['labs']])
# --
print '-- aligning schemas --'
N = 0
while True:
uschema = dpreds.lab.apply(lambda x: x.split('-')[0]).unique()
print 'number of schemas : %d' % (len(uschema) - 1)
if len(uschema) == 1:
break
dpreds = merge_pair(dpreds, uschema)
N += 1
resolved_sets = map(resolve_set, dpreds.lab.unique())
json.dump(resolved_sets, open(config['outfile'], 'w'), indent = 2) | apache-2.0 |
henrykironde/scikit-learn | examples/tree/plot_tree_regression.py | 206 | 1476 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="data")
plt.plot(X_test, y_1, c="g", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
MartinDelzant/scikit-learn | sklearn/utils/graph.py | 289 | 6239 | """
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <hagberg@lanl.gov>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
| bsd-3-clause |
jrderuiter/pyim | src/pyim/annotate/annotators/window.py | 1 | 5561 | from collections import namedtuple
from itertools import chain
from pathlib import Path
import pandas as pd
from pyim.vendor.genopandas import GenomicDataFrame
from .base import Annotator, AnnotatorCommand, CisAnnotator
from ..util import filter_blacklist, select_closest, annotate_insertion
class WindowAnnotator(Annotator):
"""Window annotator class."""
def __init__(self, genes, windows, closest=False, blacklist=None):
super().__init__()
self._windows = windows
self._genes = genes
self._closest = closest
self._blacklist = blacklist
@classmethod
def from_window_size(cls, genes, window_size, **kwargs):
"""Creates instance using given window size."""
window = Window(
upstream=window_size,
downstream=window_size,
strand=None,
name=None,
strict_left=False,
strict_right=False)
return cls(genes=genes, windows=[window], **kwargs)
def annotate(self, insertions):
yield from chain.from_iterable((self._annotate_insertion(ins)
for ins in insertions))
def _annotate_insertion(self, insertion):
# Identify overlapping features.
hits = []
for window in self._windows:
region = window.apply(insertion.chromosome, insertion.position,
insertion.strand)
overlap = self._get_genes(region)
overlap = overlap.assign(window=window.name)
hits.append(overlap)
hits = pd.concat(hits, axis=0, ignore_index=True)
# Filter for closest/blacklist.
if self._closest:
hits = select_closest(insertion, hits)
if self._blacklist is not None:
hits = filter_blacklist(hits, self._blacklist)
# Annotate insertion with identified hits.
yield from annotate_insertion(insertion, hits)
def _get_genes(self, region):
try:
overlap = self._genes.gi.search(
region.chromosome,
region.start,
region.end,
strict_left=region.strict_left,
strict_right=region.strict_right)
if region.strand is not None:
overlap = overlap.loc[overlap['strand'] == region.strand]
except KeyError:
overlap = GenomicDataFrame(
pd.DataFrame().reindex(columns=self._genes.columns))
return overlap
class Window(object):
"""Class respresenting a relative genomic window."""
def __init__(self,
upstream,
downstream,
strand,
name=None,
strict_left=False,
strict_right=False):
self.name = name
self.upstream = upstream
self.downstream = downstream
self.strand = strand
self.strict_left = strict_left
self.strict_right = strict_right
def apply(self, chromosome, position, strand):
"""Applies window to given position."""
# Determine start/end position.
if strand == 1:
start = position - self.upstream
end = position + self.downstream
strict_left = self.strict_left
strict_right = self.strict_right
elif strand == -1:
start = position - self.downstream
end = position + self.upstream
strict_right = self.strict_left
strict_left = self.strict_right
else:
raise ValueError('Unknown value for strand ({})'.format(strand))
strand = None if self.strand is None else self.strand * strand
return Region(
chromosome,
start,
end,
strand,
strict_left=strict_left,
strict_right=strict_right)
Region = namedtuple('Region', [
'chromosome', 'start', 'end', 'strand', 'strict_left', 'strict_right'
])
class WindowAnnotatorCommand(AnnotatorCommand):
"""WindowAnnotator command."""
name = 'window'
def configure(self, parser):
super().configure(parser)
# Required arguments.
parser.add_argument('--gtf', required=True, type=Path)
# Optional arguments.
parser.add_argument('--window_size', default=20000, type=int)
parser.add_argument('--closest', default=False, action='store_true')
parser.add_argument('--blacklist', nargs='+', default=None)
def run(self, args):
# Read insertions and genes.
insertions = self._read_insertions(args.insertions)
genes = self._read_genes_from_gtf(args.gtf)
# Setup annotator.
if args.cis_sites is not None:
cis_sites = list(self._read_cis_sites(args.cis_sites))
sub_annotator = WindowAnnotator.from_window_size(
genes=genes,
window_size=args.window_size,
closest=args.closest,
blacklist=args.blacklist)
annotator = CisAnnotator(
annotator=sub_annotator, genes=genes, cis_sites=cis_sites)
else:
annotator = WindowAnnotator.from_window_size(
genes=genes,
window_size=args.window_size,
closest=args.closest,
blacklist=args.blacklist)
# Annotate insertions and write output.
annotated = annotator.annotate(insertions)
self._write_output(args.output, insertions=annotated)
| mit |
moutai/scikit-learn | examples/model_selection/randomized_search.py | 44 | 3253 | """
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from operator import itemgetter
from scipy.stats import randint as sp_randint
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
digits = load_digits()
X, y = digits.data, digits.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.grid_scores_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.grid_scores_)))
report(grid_search.grid_scores_)
| bsd-3-clause |
lepy/phuzzy | phuzzy/data/plots.py | 1 | 2069 | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def p_estimates(df, ax=None, show=False):
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=(10,5))
else:
fig = plt.gcf()
for col in [c for c in df.columns if c.startswith("p_")]:
ax.plot(df.x, df[col])
ax.set_xlabel("x")
ax.set_ylabel("p")
ax.legend(loc="best")
fig.tight_layout()
if show is True:
plt.show()
return fig, ax
def bootstrapping(data, df_boot, show=False):
fig, axs = plt.subplots(1, 2, figsize=(10,5))
ax_mean = axs[0]
ax_std = axs[1]
ax_mean.hist(df_boot.x_mean)
ax_std.hist(df_boot.x_std)
ax_mean.axvline(df_boot.x_mean.mean(), color="m", lw=2, alpha=.6, dashes=[10,10])
ax_mean.axvline(data.df.x.mean(), color="r", lw=1, alpha=.6)
ax_std.axvline(df_boot.x_std.mean(), color="m", lw=2, alpha=.6, dashes=[10,10])
ax_std.axvline(data.df.x.std(), color="r", lw=1, alpha=.6)
ax_std.set_xlabel("x_std")
ax_mean.set_xlabel("x_mean")
ax_std.set_ylabel("frequency")
ax_mean.set_ylabel("frequency")
ax_std.set_title("standard deviation")
ax_mean.set_title("mean")
fig.tight_layout()
if show is True:
plt.show()
return fig, axs
def plot_hist(x, ax=None, bins=None, normed=False, color=None, **kwargs):
x = x[~np.isnan(x)]
if bins is None:
bins = 'auto'
bins, edges = np.histogram(x, bins=bins)
left,right = edges[:-1],edges[1:]
X = np.array([left,right]).T.flatten()
Y = np.array([bins,bins]).T.flatten()
if normed is True:
Y = Y/float(Y.max())
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=(10,5))
else:
fig = plt.gcf()
if kwargs.get("filled") is True:
ax.fill_between(X, 0, Y, label=kwargs.get("label"), color=kwargs.get("color", "b"), alpha=kwargs.get("alpha", .3))
else:
ax.plot(X,Y, label=kwargs.get("label"), color=kwargs.get("color", "r"), alpha=kwargs.get("alpha", .8))
return fig, ax
| mit |
tapomayukh/projects_in_python | classification/Classification_with_kNN/Single_Contact_Classification/Spatial_Resolution/space_resolution_per_meter_mov_fixed_percent.py | 1 | 4190 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
if __name__ == '__main__':
# Categories
# Considering 1 cm
# 4 Categories
# Resolution
cat_acc_resolution = np.array([60, 65.71, 64.29, 75, 79.29])
cat_taxel_resolution = np.array([1, 2, 7, 25, 100])
i = 0
cat_acc_resolution_percent = np.zeros(5)
while i < len(cat_acc_resolution)-1:
cat_acc_resolution_percent[i+1] = 100*(cat_acc_resolution[i+1] - cat_acc_resolution[0])/cat_acc_resolution[0]
i = i+1
# 2 Categories
# Resolution
cat_acc_resolution_mf = np.array([81.43, 89.29, 82.86, 85.71, 89.29])
cat_taxel_resolution_mf = np.array([1, 2, 7, 25, 100])
i = 0
cat_acc_resolution_mf_percent = np.zeros(5)
while i < len(cat_acc_resolution_mf)-1:
cat_acc_resolution_mf_percent[i+1] = 100*(cat_acc_resolution_mf[i+1] - cat_acc_resolution_mf[0])/cat_acc_resolution_mf[0]
i = i+1
# Objects
# Resolution
ob_acc_resolution = np.array([61.43, 58.57, 64.29, 65.71, 72.86])
ob_taxel_resolution = np.array([1, 2, 7, 25, 100])
i = 0
ob_acc_resolution_percent = np.zeros(5)
while i < len(ob_acc_resolution)-1:
ob_acc_resolution_percent[i+1] = 100*(ob_acc_resolution[i+1] - ob_acc_resolution[0])/ob_acc_resolution[0]
i = i+1
# Considering 0.9 cm
# 4 Categories
# Resolution
cat_acc_resolution_2 = np.array([60, 65.71, 64.29, 75, 79.29])
cat_taxel_resolution_2 = np.array([1, 2, 7, 28, 112])
i = 0
cat_acc_resolution_2_percent = np.zeros(5)
while i < len(cat_acc_resolution_2)-1:
cat_acc_resolution_2_percent[i+1] = 100*(cat_acc_resolution_2[i+1] - cat_acc_resolution_2[0])/cat_acc_resolution_2[0]
i = i+1
# 2 Categories
# Resolution
cat_acc_resolution_2_mf = np.array([81.43, 89.29, 82.86, 85.71, 89.29])
cat_taxel_resolution_2_mf = np.array([1, 2, 7, 28, 112])
i = 0
cat_acc_resolution_2_mf_percent = np.zeros(5)
while i < len(cat_acc_resolution_2_mf)-1:
cat_acc_resolution_2_mf_percent[i+1] = 100*(cat_acc_resolution_2_mf[i+1] - cat_acc_resolution_2_mf[0])/cat_acc_resolution_2_mf[0]
i = i+1
# Objects
# Resolution
ob_acc_resolution_2 = np.array([61.43, 58.57, 64.29, 65.71, 72.86])
ob_taxel_resolution_2 = np.array([1, 2, 7, 28, 112])
i = 0
ob_acc_resolution_2_percent = np.zeros(5)
while i < len(ob_acc_resolution_2)-1:
ob_acc_resolution_2_percent[i+1] = 100*(ob_acc_resolution_2[i+1] - ob_acc_resolution_2[0])/ob_acc_resolution_2[0]
i = i+1
# Plots
# Resolution
figure(1)
plot(cat_taxel_resolution,cat_acc_resolution_percent,'-s')
grid('True')
hold('True')
plot(cat_taxel_resolution_mf,cat_acc_resolution_mf_percent,'-s')
plot(ob_taxel_resolution,ob_acc_resolution_percent,'-s')
legend(('4-Categories', '2-Categories', 'Objects'),loc='4')
ylabel('Percentage Improvement in Cross-Validation Accuracy from 1-Taxel')
xlabel('Taxels/m (Each Taxel = 1cm X 1cm)')
#axis([0,100,0,90])
# Resolution
figure(2)
plot(cat_taxel_resolution_2,cat_acc_resolution_2_percent,'-s')
grid('True')
hold('True')
plot(cat_taxel_resolution_2_mf,cat_acc_resolution_2_mf_percent,'-s')
plot(ob_taxel_resolution_2,ob_acc_resolution_2_percent,'-s')
legend(('4-Categories', '2-Categories', 'Objects'),loc='4')
ylabel('Percentage Improvement in Cross-Validation Accuracy from 1-Taxel')
xlabel('Taxels/m (Each Taxel = 9mm X 9mm)')
#axis([0,112,0,90])
show()
| mit |
HEHenson/CanDataPY | misc.py | 1 | 1202 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 18 19:57:47 2016
@author: lancehermes
"""
import glob
import shutil
from pandas import Series, DataFrame, HDFStore
import pandas.rpy.common as com
import feather
from rpy2.robjects import pandas2ri
def copycsv():
rootdir = "/home/lancehermes/Dropbox/business/Project/CANSIMPY"
srcdir = rootdir + "/rawdownload/"
destdir = rootdir + "/test2/rawdump/"
srcfiles = srcdir + "*.csv"
print("copy from %s \n to %s" %(srcfiles,destdir))
for data in glob.glob(srcfiles):
shutil.copy2(data,destdir)
def exportmatrix(thematrix,targettype):
"""export dataframe to target package"""
# first retrieve matrix
MYstore = HDFStore('Central_data.h5','r+')
thedatfr = MYstore.select(thematrix)
# only R supported at this time
if(targettype == 'R'):
ex_to_R(thedatfr,thematrix)
def ex_to_R(thedatfr,thematrix):
"""copy to dataframe to R"""
# note that feather had to be hand installed
# the extra copy is a temporary patch to fix a known bug
thedatfr_stride = thedatfr.copy()
feather.write_dataframe(thedatfr_stride,thematrix)
if __name__ == '__main__':
copycsv() | unlicense |
drammock/expyfun | expyfun/visual/_visual.py | 2 | 46036 | """
Visual stimulus design
======================
Tools for drawing shapes and text on the screen.
"""
# Authors: Dan McCloy <drmccloy@uw.edu>
# Eric Larson <larsoner@uw.edu>
# Ross Maddox <rkmaddox@uw.edu>
#
# License: BSD (3-clause)
from ctypes import (cast, pointer, POINTER, create_string_buffer, c_char,
c_int, c_float)
from functools import partial
import re
import warnings
import numpy as np
try:
from PyOpenGL import gl
except ImportError:
from pyglet import gl
from .._utils import check_units, string_types, logger, _new_pyglet
def _convert_color(color, byte=True):
"""Convert 3- or 4-element color into OpenGL usable color"""
from matplotlib.colors import colorConverter
color = (0., 0., 0., 0.) if color is None else color
color = 255 * np.array(colorConverter.to_rgba(color))
color = color.astype(np.uint8)
if not byte:
color = (color / 255.).astype(np.float32)
return tuple(color)
def _replicate_color(color, pts):
"""Convert single color to color array for OpenGL trianglulations"""
return np.tile(color, len(pts) // 2)
##############################################################################
# Text
class Text(object):
"""A text object.
Parameters
----------
ec : instance of ExperimentController
Parent EC.
text : str
The text to display.
pos : array
2-element array consisting of X- and Y-position coordinates.
color : matplotlib Color
Color of the text.
font_name : str
Font to use.
font_size : float
Font size (points) to use.
height : float | None
Height of the text region. None will automatically allocate the
necessary size.
width : float | None | str
Width (in pixels) of the text region. `'auto'` will allocate 80% of
the screen width, useful for instructions. None will automatically
allocate sufficient space, but not that this disables text wrapping.
anchor_x : str
Horizontal text anchor (e.g., ``'center'``).
anchor_y : str
Vertical text anchor (e.g., ``'center'``).
units : str
Units to use. These will apply to all spatial aspects of the drawing.
shape e.g. size, position. See ``check_units`` for options.
wrap : bool
Whether or not the text will wrap to fit in screen, appropriate for
multiline text. Inappropriate for text requiring precise positioning.
attr : bool
Should the text be interpreted with pyglet's ``decode_attributed``
method? This allows inline formatting for text color, e.g.,
``'This is {color (255, 0, 0, 255)}red text'``. If ``attr=True``, the
values of ``font_name``, ``font_size``, and ``color`` are automatically
prepended to ``text`` (though they will be overridden by any inline
formatting within ``text`` itself).
Returns
-------
text : instance of Text
The text object.
"""
def __init__(self, ec, text, pos=(0, 0), color='white',
font_name='Arial', font_size=24, height=None,
width='auto', anchor_x='center', anchor_y='center',
units='norm', wrap=False, attr=True):
import pyglet
pos = np.array(pos)[:, np.newaxis]
pos = ec._convert_units(pos, units, 'pix')
if width == 'auto':
width = float(ec.window_size_pix[0]) * 0.8
elif isinstance(width, string_types):
raise ValueError('"width", if str, must be "auto"')
self._attr = attr
if wrap:
text = text + '\n ' # weird Pyglet bug
if self._attr:
preamble = ('{{font_name \'{}\'}}{{font_size {}}}{{color {}}}'
'').format(font_name, font_size, _convert_color(color))
doc = pyglet.text.decode_attributed(preamble + text)
self._text = pyglet.text.layout.TextLayout(
doc, width=width, height=height, multiline=wrap,
dpi=int(ec.dpi))
else:
self._text = pyglet.text.Label(
text, width=width, height=height, multiline=wrap,
dpi=int(ec.dpi))
self._text.color = _convert_color(color)
self._text.font_name = font_name
self._text.font_size = font_size
self._text.x = pos[0]
self._text.y = pos[1]
self._text.anchor_x = anchor_x
self._text.anchor_y = anchor_y
def set_color(self, color):
"""Set the text color
Parameters
----------
color : matplotlib Color | None
The color. Use None for no color.
"""
if self._attr:
self._text.document.set_style(0, len(self._text.document.text),
{'color': _convert_color(color)})
else:
self._text.color = _convert_color(color)
def draw(self):
"""Draw the object to the display buffer"""
self._text.draw()
##############################################################################
# Triangulations
tri_vert = """
#version 120
attribute vec2 a_position;
uniform mat4 u_view;
void main()
{
gl_Position = u_view * vec4(a_position, 0.0, 1.0);
}
"""
tri_frag = """
#version 120
uniform vec4 u_color;
void main()
{
gl_FragColor = u_color;
}
"""
def _check_log(obj, func):
log = create_string_buffer(4096)
ptr = cast(pointer(log), POINTER(c_char))
func(obj, 4096, pointer(c_int()), ptr)
message = log.value
message = message.decode()
if message.startswith('No errors') or \
re.match('.*shader was successfully compiled.*', message) or \
message == 'Vertex shader(s) linked, fragment shader(s) linked.\n':
pass
elif message:
raise RuntimeError(message)
class _Triangular(object):
"""Super class for objects that use triangulations and/or lines"""
def __init__(self, ec, fill_color, line_color, line_width, line_loop):
self._ec = ec
self._line_width = line_width
self._line_loop = line_loop # whether or not lines drawn are looped
# initialize program and shaders
self._program = gl.glCreateProgram()
vertex = gl.glCreateShader(gl.GL_VERTEX_SHADER)
buf = create_string_buffer(tri_vert.encode('ASCII'))
ptr = cast(pointer(pointer(buf)), POINTER(POINTER(c_char)))
gl.glShaderSource(vertex, 1, ptr, None)
gl.glCompileShader(vertex)
_check_log(vertex, gl.glGetShaderInfoLog)
fragment = gl.glCreateShader(gl.GL_FRAGMENT_SHADER)
buf = create_string_buffer(tri_frag.encode('ASCII'))
ptr = cast(pointer(pointer(buf)), POINTER(POINTER(c_char)))
gl.glShaderSource(fragment, 1, ptr, None)
gl.glCompileShader(fragment)
_check_log(fragment, gl.glGetShaderInfoLog)
gl.glAttachShader(self._program, vertex)
gl.glAttachShader(self._program, fragment)
gl.glLinkProgram(self._program)
_check_log(self._program, gl.glGetProgramInfoLog)
gl.glDetachShader(self._program, vertex)
gl.glDetachShader(self._program, fragment)
gl.glUseProgram(self._program)
# Prepare buffers and bind attributes
loc = gl.glGetUniformLocation(self._program, b'u_view')
view = ec.window_size_pix
view = np.diag([2. / view[0], 2. / view[1], 1., 1.])
view[-1, :2] = -1
view = view.astype(np.float32).ravel()
gl.glUniformMatrix4fv(loc, 1, False, (c_float * 16)(*view))
self._counts = dict()
self._colors = dict()
self._buffers = dict()
self._points = dict()
self._tris = dict()
for kind in ('line', 'fill'):
self._counts[kind] = 0
self._colors[kind] = (0., 0., 0., 0.)
self._buffers[kind] = dict(array=gl.GLuint())
gl.glGenBuffers(1, pointer(self._buffers[kind]['array']))
self._buffers['fill']['index'] = gl.GLuint()
gl.glGenBuffers(1, pointer(self._buffers['fill']['index']))
gl.glUseProgram(0)
self.set_fill_color(fill_color)
self.set_line_color(line_color)
def _set_points(self, points, kind, tris):
"""Set fill and line points."""
if points is None:
self._counts[kind] = 0
points = np.asarray(points, dtype=np.float32, order='C')
assert points.ndim == 2 and points.shape[1] == 2
array_count = points.size // 2 if kind == 'line' else points.size
if kind == 'fill':
assert tris is not None
tris = np.asarray(tris, dtype=np.uint32, order='C')
assert tris.ndim == 1 and tris.size % 3 == 0
tris.shape = (-1, 3)
assert (tris < len(points)).all()
self._tris[kind] = tris
del tris
self._points[kind] = points
del points
gl.glUseProgram(self._program)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self._buffers[kind]['array'])
gl.glBufferData(gl.GL_ARRAY_BUFFER, self._points[kind].size * 4,
self._points[kind].tobytes(),
gl.GL_STATIC_DRAW)
if kind == 'line':
self._counts[kind] = array_count
if kind == 'fill':
self._counts[kind] = self._tris[kind].size
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER,
self._buffers[kind]['index'])
gl.glBufferData(gl.GL_ELEMENT_ARRAY_BUFFER,
self._tris[kind].size * 4,
self._tris[kind].tobytes(),
gl.GL_STATIC_DRAW)
gl.glUseProgram(0)
def _set_fill_points(self, points, tris):
self._set_points(points, 'fill', tris)
def _set_line_points(self, points):
self._set_points(points, 'line', None)
def set_fill_color(self, fill_color):
"""Set the object color
Parameters
----------
fill_color : matplotlib Color | None
The fill color. Use None for no fill.
"""
self._colors['fill'] = _convert_color(fill_color, byte=False)
def set_line_color(self, line_color):
"""Set the object color
Parameters
----------
line_color : matplotlib Color | None
The fill color. Use None for no fill.
"""
self._colors['line'] = _convert_color(line_color, byte=False)
def set_line_width(self, line_width):
"""Set the line width in pixels
Parameters
----------
line_width : float
The line width. Must be given in pixels. Due to OpenGL
limitations, it must be `0.0 <= line_width <= 10.0`.
"""
line_width = float(line_width)
if not (0.0 <= line_width <= 10.0):
raise ValueError('line_width must be between 0 and 10')
self._line_width = line_width
def draw(self):
"""Draw the object to the display buffer."""
gl.glUseProgram(self._program)
for kind in ('fill', 'line'):
if self._counts[kind] > 0:
if kind == 'line':
if self._line_width <= 0.0:
continue
gl.glLineWidth(self._line_width)
if self._line_loop:
mode = gl.GL_LINE_LOOP
else:
mode = gl.GL_LINE_STRIP
cmd = partial(gl.glDrawArrays, mode, 0, self._counts[kind])
else:
gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER,
self._buffers[kind]['index'])
cmd = partial(gl.glDrawElements, gl.GL_TRIANGLES,
self._counts[kind], gl.GL_UNSIGNED_INT, 0)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER,
self._buffers[kind]['array'])
loc_pos = gl.glGetAttribLocation(self._program, b'a_position')
gl.glEnableVertexAttribArray(loc_pos)
gl.glVertexAttribPointer(loc_pos, 2, gl.GL_FLOAT, gl.GL_FALSE,
0, 0)
loc_col = gl.glGetUniformLocation(self._program, b'u_color')
gl.glUniform4f(loc_col, *self._colors[kind])
cmd()
# The following line is probably only necessary because
# Pyglet makes some assumptions about the GL state that
# it perhaps shouldn't. Without it, Text might not
# render properly (see #252)
gl.glDisableVertexAttribArray(loc_pos)
gl.glUseProgram(0)
class Line(_Triangular):
"""A connected set of line segments
Parameters
----------
ec : instance of ExperimentController
Parent EC.
coords : array-like
2 x N set of X, Y coordinates.
units : str
Units to use. These will apply to all spatial aspects of the drawing.
shape e.g. size, position. See ``check_units`` for options.
line_color : matplotlib Color
Color of the line.
line_width : float
Line width in pixels.
line_loop : bool
If True, the last point will be joined to the first in a loop.
Returns
-------
line : instance of Line
The line object.
"""
def __init__(self, ec, coords, units='norm', line_color='white',
line_width=1.0, line_loop=False):
_Triangular.__init__(self, ec, fill_color=None, line_color=line_color,
line_width=line_width, line_loop=line_loop)
self.set_coords(coords, units)
self.set_line_color(line_color)
def set_coords(self, coords, units='norm'):
"""Set line coordinates
Parameters
----------
coords : array-like
2 x N set of X, Y coordinates.
units : str
Units to use.
"""
check_units(units)
coords = np.array(coords, dtype=float)
if coords.ndim == 1:
coords = coords[:, np.newaxis]
if coords.ndim != 2 or coords.shape[0] != 2:
raise ValueError('coords must be a vector of length 2, or an '
'array with 2 dimensions (with first dimension '
'having length 2')
self._set_line_points(self._ec._convert_units(coords, units, 'pix').T)
class Triangle(_Triangular):
"""A triangle
Parameters
----------
ec : instance of ExperimentController
Parent EC.
coords : array-like
2 x 3 set of X, Y coordinates.
units : str
Units to use. These will apply to all spatial aspects of the drawing.
shape e.g. size, position. See ``check_units`` for options.
fill_color : matplotlib Color
Color of the triangle.
line_color : matplotlib Color | None
Color of the border line. None is transparent.
line_width : float
Line width in pixels.
Returns
-------
line : instance of Triangle
The triangle object.
"""
def __init__(self, ec, coords, units='norm', fill_color='white',
line_color=None, line_width=1.0):
_Triangular.__init__(self, ec, fill_color=fill_color,
line_color=line_color, line_width=line_width,
line_loop=True)
self.set_coords(coords, units)
self.set_fill_color(fill_color)
def set_coords(self, coords, units='norm'):
"""Set triangle coordinates
Parameters
----------
coords : array-like
2 x 3 set of X, Y coordinates.
units : str
Units to use.
"""
check_units(units)
coords = np.array(coords, dtype=float)
if coords.shape != (2, 3):
raise ValueError('coords must be an array of shape (2, 3), got %s'
% (coords.shape,))
points = self._ec._convert_units(coords, units, 'pix')
points = points.T
self._set_fill_points(points, [0, 1, 2])
self._set_line_points(points)
class Rectangle(_Triangular):
"""A rectangle.
Parameters
----------
ec : instance of ExperimentController
Parent EC.
pos : array-like
4-element array-like with X, Y center and width, height where x and y
are coordinates of the center.
units : str
Units to use. These will apply to all spatial aspects of the drawing.
shape e.g. size, position. See ``check_units`` for options.
fill_color : matplotlib Color | None
Color to fill with. None is transparent.
line_color : matplotlib Color | None
Color of the border line. None is transparent.
line_width : float
Line width in pixels.
Returns
-------
line : instance of Rectangle
The rectangle object.
"""
def __init__(self, ec, pos, units='norm', fill_color='white',
line_color=None, line_width=1.0):
_Triangular.__init__(self, ec, fill_color=fill_color,
line_color=line_color, line_width=line_width,
line_loop=True)
self.set_pos(pos, units)
def set_pos(self, pos, units='norm'):
"""Set the position of the rectangle
Parameters
----------
pos : array-like
X, Y, width, height of the rectangle.
units : str
Units to use. See ``check_units`` for options.
"""
check_units(units)
# do this in normalized units, then convert
pos = np.array(pos)
if not (pos.ndim == 1 and pos.size == 4):
raise ValueError('pos must be a 4-element array-like vector')
self._pos = pos
w = self._pos[2]
h = self._pos[3]
points = np.array([[-w / 2., -h / 2.],
[-w / 2., h / 2.],
[w / 2., h / 2.],
[w / 2., -h / 2.]]).T
points += np.array(self._pos[:2])[:, np.newaxis]
points = self._ec._convert_units(points, units, 'pix')
points = points.T
self._set_fill_points(points, [0, 1, 2, 0, 2, 3])
self._set_line_points(points) # all 4 points used for line drawing
class Diamond(_Triangular):
"""A diamond.
Parameters
----------
ec : instance of ExperimentController
Parent EC.
pos : array-like
4-element array-like with X, Y center and width, height where x and y
are coordinates of the center.
units : str
Units to use. These will apply to all spatial aspects of the drawing.
shape e.g. size, position. See ``check_units`` for options.
fill_color : matplotlib Color | None
Color to fill with. None is transparent.
line_color : matplotlib Color | None
Color of the border line. None is transparent.
line_width : float
Line width in pixels.
Returns
-------
line : instance of Rectangle
The rectangle object.
"""
def __init__(self, ec, pos, units='norm', fill_color='white',
line_color=None, line_width=1.0):
_Triangular.__init__(self, ec, fill_color=fill_color,
line_color=line_color, line_width=line_width,
line_loop=True)
self.set_pos(pos, units)
def set_pos(self, pos, units='norm'):
"""Set the position of the rectangle
Parameters
----------
pos : array-like
X, Y, width, height of the rectangle.
units : str
Units to use. See ``check_units`` for options.
"""
check_units(units)
# do this in normalized units, then convert
pos = np.array(pos)
if not (pos.ndim == 1 and pos.size == 4):
raise ValueError('pos must be a 4-element array-like vector')
self._pos = pos
w = self._pos[2]
h = self._pos[3]
points = np.array([[w / 2., 0.],
[0., h / 2.],
[-w / 2., 0.],
[0., -h / 2.]]).T
points += np.array(self._pos[:2])[:, np.newaxis]
points = self._ec._convert_units(points, units, 'pix')
points = points.T
self._set_fill_points(points, [0, 1, 2, 0, 2, 3])
self._set_line_points(points)
class Circle(_Triangular):
"""A circle or ellipse.
Parameters
----------
ec : instance of ExperimentController
Parent EC.
radius : float | array-like
Radius of the circle. Can be array-like with two elements to
make an ellipse.
pos : array-like
2-element array-like with X, Y center positions.
units : str
Units to use. These will apply to all spatial aspects of the drawing.
shape e.g. size, position. See ``check_units`` for options.
n_edges : int
Number of edges to use (must be >= 4) to approximate a circle.
fill_color : matplotlib Color | None
Color to fill with. None is transparent.
line_color : matplotlib Color | None
Color of the border line. None is transparent.
line_width : float
Line width in pixels.
Returns
-------
circle : instance of Circle
The circle object.
"""
def __init__(self, ec, radius=1, pos=(0, 0), units='norm',
n_edges=200, fill_color='white', line_color=None,
line_width=1.0):
_Triangular.__init__(self, ec, fill_color=fill_color,
line_color=line_color, line_width=line_width,
line_loop=True)
if not isinstance(n_edges, int):
raise TypeError('n_edges must be an int')
if n_edges < 4:
raise ValueError('n_edges must be >= 4 for a reasonable circle')
self._n_edges = n_edges
# construct triangulation (never changes so long as n_edges is fixed)
tris = [[0, ii + 1, ii + 2] for ii in range(n_edges)]
tris = np.concatenate(tris)
tris[-1] = 1 # fix wrap for last triangle
self._orig_tris = tris
# need to set a dummy value here so recalculation doesn't fail
self._radius = np.array([1., 1.])
self.set_pos(pos, units)
self.set_radius(radius, units)
def set_radius(self, radius, units='norm'):
"""Set the position and radius of the circle
Parameters
----------
radius : array-like | float
X- and Y-direction extents (radii) of the circle / ellipse.
A single value (float) will be replicated for both directions.
units : str
Units to use. See ``check_units`` for options.
"""
check_units(units)
radius = np.atleast_1d(radius).astype(float)
if radius.ndim != 1 or radius.size > 2:
raise ValueError('radius must be a 1- or 2-element '
'array-like vector')
if radius.size == 1:
radius = np.r_[radius, radius]
# convert to pixel (OpenGL) units
self._radius = self._ec._convert_units(radius[:, np.newaxis],
units, 'pix')[:, 0]
# need to subtract center position
ctr = self._ec._convert_units(np.zeros((2, 1)), units, 'pix')[:, 0]
self._radius -= ctr
self._recalculate()
def set_pos(self, pos, units='norm'):
"""Set the position and radius of the circle
Parameters
----------
pos : array-like
X, Y center of the circle.
units : str
Units to use. See ``check_units`` for options.
"""
check_units(units)
pos = np.array(pos, dtype=float)
if not (pos.ndim == 1 and pos.size == 2):
raise ValueError('pos must be a 2-element array-like vector')
# convert to pixel (OpenGL) units
self._pos = self._ec._convert_units(pos[:, np.newaxis],
units, 'pix')[:, 0]
self._recalculate()
def _recalculate(self):
"""Helper to recalculate point coordinates"""
edges = self._n_edges
arg = 2 * np.pi * (np.arange(edges) / float(edges))
points = np.array([self._radius[0] * np.cos(arg),
self._radius[1] * np.sin(arg)])
points = np.c_[np.zeros((2, 1)), points] # prepend the center
points += np.array(self._pos[:2], dtype=float)[:, np.newaxis]
points = points.T
self._set_fill_points(points, self._orig_tris)
self._set_line_points(points[1:]) # omit center point for lines
class ConcentricCircles(object):
"""A set of filled concentric circles drawn without edges.
Parameters
----------
ec : instance of ExperimentController
Parent EC.
radii : list of float
Radii of the circles. Note that circles will be drawn in order,
so using e.g., radii=[1., 2.] will cause the first circle to be
covered by the second.
pos : array-like
2-element array-like with the X, Y center position.
units : str
Units to use. These will apply to all spatial aspects of the drawing.
See ``check_units`` for options.
colors : list or tuple of matplotlib Colors
Color to fill each circle with.
Returns
-------
circle : instance of Circle
The circle object.
"""
def __init__(self, ec, radii=(0.2, 0.05), pos=(0, 0), units='norm',
colors=('w', 'k')):
radii = np.array(radii, float)
if radii.ndim != 1:
raise ValueError('radii must be 1D')
if not isinstance(colors, (tuple, list)):
raise TypeError('colors must be a tuple, list, or array')
if len(colors) != len(radii):
raise ValueError('colors and radii must be the same length')
# need to set a dummy value here so recalculation doesn't fail
self._circles = [Circle(ec, r, pos, units, fill_color=c, line_width=0)
for r, c in zip(radii, colors)]
def __len__(self):
return len(self._circles)
def set_pos(self, pos, units='norm'):
"""Set the position of the circles
Parameters
----------
pos : array-like
X, Y center of the circle.
units : str
Units to use. See ``check_units`` for options.
"""
for circle in self._circles:
circle.set_pos(pos, units)
def set_radius(self, radius, idx, units='norm'):
"""Set the radius of one of the circles
Parameters
----------
radius : float
Radius the circle.
idx : int
Index of the circle.
units : str
Units to use. See ``check_units`` for options.
"""
self._circles[idx].set_radius(radius, units)
def set_radii(self, radii, units='norm'):
"""Set the color of each circle
Parameters
----------
radii : array-like
List of radii to assign to the circles. Must contain the same
number of radii as the number of circles.
units : str
Units to use. See ``check_units`` for options.
"""
radii = np.array(radii, float)
if radii.ndim != 1 or radii.size != len(self):
raise ValueError('radii must contain exactly {0} radii'
''.format(len(self)))
for idx, radius in enumerate(radii):
self.set_radius(radius, idx, units)
def set_color(self, color, idx):
"""Set the color of one of the circles
Parameters
----------
color : matplotlib Color
Color of the circle.
idx : int
Index of the circle.
"""
self._circles[idx].set_fill_color(color)
def set_colors(self, colors):
"""Set the color of each circle.
Parameters
----------
colors : list or tuple of matplotlib Colors
Must be of type list or tuple, and contain the same number of
colors as the number of circles.
"""
if not isinstance(colors, (tuple, list)) or len(colors) != len(self):
raise ValueError('colors must be a list or tuple with {0} colors'
''.format(len(self)))
for idx, color in enumerate(colors):
self.set_color(color, idx)
def draw(self):
"""Draw the fixation dot."""
for circle in self._circles:
circle.draw()
class FixationDot(ConcentricCircles):
"""A reasonable centered fixation dot.
This uses concentric circles, the inner of which has a radius of one
pixel, to create a fixation dot. If finer-grained control is desired,
consider using ``ConcentricCircles``.
Parameters
----------
ec : instance of ExperimentController
Parent EC.
colors : list of matplotlib Colors
Color to fill the outer and inner circle with, respectively.
Returns
-------
fix : instance of FixationDot
The fixation dot.
"""
def __init__(self, ec, colors=('w', 'k')):
if len(colors) != 2:
raise ValueError('colors must have length 2')
super(FixationDot, self).__init__(ec, radii=[0.2, 0.2],
pos=[0, 0], units='deg',
colors=colors)
self.set_radius(1, 1, units='pix')
class ProgressBar(object):
"""A progress bar that can be displayed between sections.
This uses two rectangles, one outline, and one solid to show how much
progress has been made in the experiment.
Parameters
----------
ec : instance of ExperimentController
Parent EC.
pos : array-like
4-element array-like with X, Y center and width, height where x and y
are coordinates of the box center.
units : str
Units to use. These will apply to all spatial aspects of the drawing.
Must be either ``'norm'`` or ``'pix'``.
colors : list or tuple of matplotlib Colors
Colors to fill and outline the bar respectively. Defaults to green and
white.
"""
def __init__(self, ec, pos, units='norm', colors=('g', 'w')):
self._ec = ec
if len(colors) != 2:
raise ValueError('colors must have length 2')
if units not in ['norm', 'pix']:
raise ValueError('units must be either \'norm\' or \'pix\'')
pos = np.array(pos, dtype=float)
self._pos = pos
self._width = pos[2]
self._units = units
# initialize the bar with zero progress
self._pos_bar = pos.copy()
self._pos_bar[0] -= self._width * 0.5
self._init_x = self._pos_bar[0]
self._pos_bar[2] = 0
self._rectangles = [Rectangle(ec, self._pos_bar, units, colors[0],
None),
Rectangle(ec, self._pos, units, None, colors[1])]
def update_bar(self, percent):
"""Update the progress of the bar.
Parameters
----------
percent: float
The percentage of the bar to be filled. Must be between 0 and 1.
"""
if percent > 100 or percent < 0:
raise ValueError('percent must be a float between 0 and 100')
self._pos_bar[2] = percent * self._width / 100.
self._pos_bar[0] = self._init_x + self._pos_bar[2] * 0.5
self._rectangles[0].set_pos(self._pos_bar, self._units)
def draw(self):
"""Draw the progress bar."""
for rectangle in self._rectangles:
rectangle.draw()
##############################################################################
# Image display
class RawImage(object):
"""Create image from array for on-screen display.
Parameters
----------
ec : instance of ExperimentController
Parent EC.
image_buffer : array
Array, shape (N, M[, 3/4]). Color values should range between 0 and 1.
pos : array-like
2-element array-like with X, Y (center) arguments.
scale : float
The scale factor. 1 is native size (pixel-to-pixel), 2 is twice as
large, etc.
units : str
Units to use for the position. See ``check_units`` for options.
Returns
-------
img : instance of RawImage
The image object.
"""
def __init__(self, ec, image_buffer, pos=(0, 0), scale=1., units='norm'):
self._ec = ec
self._img = None
self.set_image(image_buffer)
self.set_pos(pos, units)
self.set_scale(scale)
def set_image(self, image_buffer):
"""Set image buffer data
Parameters
----------
image_buffer : array
N x M x 3 (or 4) array. Can be type ``np.float64`` or ``np.uint8``.
If ``np.float64``, color values must range between 0 and 1.
``np.uint8`` is slightly more efficient.
"""
from pyglet import image, sprite
image_buffer = np.ascontiguousarray(image_buffer)
if image_buffer.dtype not in (np.float64, np.uint8):
raise TypeError('image_buffer must be np.float64 or np.uint8')
if image_buffer.dtype == np.float64:
if image_buffer.max() > 1 or image_buffer.min() < 0:
raise ValueError('all float values must be between 0 and 1')
image_buffer = (image_buffer * 255).astype('uint8')
if image_buffer.ndim == 2: # grayscale
image_buffer = np.tile(image_buffer[..., np.newaxis], (1, 1, 3))
if not image_buffer.ndim == 3 or image_buffer.shape[2] not in [3, 4]:
raise RuntimeError('image_buffer incorrect size: {}'
''.format(image_buffer.shape))
# add alpha channel if necessary
dims = image_buffer.shape
fmt = 'RGB' if dims[2] == 3 else 'RGBA'
self._sprite = sprite.Sprite(image.ImageData(dims[1], dims[0], fmt,
image_buffer.tobytes(),
-dims[1] * dims[2]))
def set_pos(self, pos, units='norm'):
"""Set image position.
Parameters
----------
pos : array-like
2-element array-like with X, Y (center) arguments.
units : str
Units to use. See ``check_units`` for options.
"""
pos = np.array(pos, float)
if pos.ndim != 1 or pos.size != 2:
raise ValueError('pos must be a 2-element array')
pos = np.reshape(pos, (2, 1))
self._pos = self._ec._convert_units(pos, units, 'pix').ravel()
@property
def bounds(self):
"""Left, Right, Bottom, Top (in pixels) of the image."""
pos = np.array(self._pos, float)
size = np.array([self._sprite.width,
self._sprite.height], float)
bounds = np.concatenate((pos - size / 2., pos + size / 2.))
return bounds[[0, 2, 1, 3]]
@property
def scale(self):
return self._scale
def set_scale(self, scale):
"""Create image from array for on-screen display.
Parameters
----------
scale : float
The scale factor. 1 is native size (pixel-to-pixel), 2 is twice as
large, etc.
"""
scale = float(scale)
self._scale = scale
self._sprite.scale = self._scale
def draw(self):
"""Draw the image to the buffer"""
self._sprite.scale = self._scale
pos = self._pos - [self._sprite.width / 2., self._sprite.height / 2.]
try:
self._sprite.position = (pos[0], pos[1])
except AttributeError:
self._sprite.set_position(pos[0], pos[1])
self._sprite.draw()
def get_rect(self, units='norm'):
"""X, Y center, Width, Height of image.
Parameters
----------
units : str
Units to use for the position. See ``check_units`` for options.
Returns
-------
rect : ndarray
The rect.
"""
# left,right,bottom,top
lrbt = self._ec._convert_units(self.bounds.reshape(2, -1),
fro='pix', to=units)
center = self._ec._convert_units(self._pos.reshape(2, -1),
fro='pix', to=units)
width_height = np.diff(lrbt, axis=-1)
return np.squeeze(np.concatenate([center, width_height]))
class Video(object):
"""Read video file and draw it to the screen.
Parameters
----------
ec : instance of expyfun.ExperimentController
file_name : str
the video file path
pos : array-like
2-element array-like with X, Y elements.
units : str
Units to use for the position. See ``check_units`` for options.
scale : float | str
The scale factor. 1 is native size (pixel-to-pixel), 2 is twice as
large, etc. If scale is a string, it must be either ``'fill'``
(which ensures the entire ``ExperimentController`` window is
covered by the video, at the expense of some parts of the video
potentially being offscreen), or ``'fit'`` (which scales maximally
while ensuring none of the video is offscreen, and may result in
letterboxing or pillarboxing).
center : bool
If ``False``, the elements of ``pos`` specify the position of the lower
left corner of the video frame; otherwise they position the center of
the frame.
visible : bool
Whether to show the video when initialized. Can be toggled later using
`Video.set_visible` method.
Returns
-------
None
Notes
-----
This is a somewhat pared-down implementation of video playback. Looping is
not available, and the audio stream from the video file is discarded.
Timing of individual frames is relegated to the pyglet media player's
internal clock. Recommended for use only in paradigms where the relative
timing of audio and video are unimportant (e.g., if the video is merely
entertainment for the participant during a passive auditory task).
"""
def __init__(self, ec, file_name, pos=(0, 0), units='norm', scale=1.,
center=True, visible=True):
from pyglet.media import load, Player
self._ec = ec
self._source = load(file_name)
self._player = Player()
with warnings.catch_warnings(record=True): # deprecated eos_action
self._player.queue(self._source)
self._player._audio_player = None
frame_rate = self.frame_rate
if frame_rate is None:
logger.warning('Frame rate could not be determined')
frame_rate = 60.
self._dt = 1. / frame_rate
self._texture = None
self._playing = False
self._finished = False
self._pos = pos
self._units = units
self._center = center
self.set_scale(scale) # also calls set_pos
self._visible = visible
self._eos_fun = self._eos_new if _new_pyglet() else self._eos_old
def play(self, auto_draw=True):
"""Play video from current position.
Parameters
----------
auto_draw : bool
If True, add ``self.draw`` to ``ec.on_every_flip``.
Returns
-------
time : float
The timestamp (on the parent ``ExperimentController`` timeline) at
which ``play()`` was called.
"""
if not self._playing:
if auto_draw:
self._ec.call_on_every_flip(self.draw)
self._player.play()
self._playing = True
else:
warnings.warn('ExperimentController.video.play() called when '
'already playing.')
return self._ec.get_time()
def pause(self):
"""Halt video playback.
Returns
-------
time : float
The timestamp (on the parent ``ExperimentController`` timeline) at
which ``pause()`` was called.
"""
if self._playing:
try:
idx = self._ec.on_every_flip_functions.index(self.draw)
except ValueError: # not auto_draw
pass
else:
self._ec.on_every_flip_functions.pop(idx)
self._player.pause()
self._playing = False
else:
warnings.warn('ExperimentController.video.pause() called when '
'already paused.')
return self._ec.get_time()
def _delete(self):
"""Halt video playback and remove player."""
if self._playing:
self.pause()
self._player.delete()
def _scale_texture(self):
if self._texture:
self._texture.width = self.source_width * self._scale
self._texture.height = self.source_height * self._scale
def set_scale(self, scale=1.):
"""Set video scale.
Parameters
----------
scale : float | str
The scale factor. 1 is native size (pixel-to-pixel), 2 is twice as
large, etc. If scale is a string, it must be either ``'fill'``
(which ensures the entire ``ExperimentController`` window is
covered by the video, at the expense of some parts of the video
potentially being offscreen), or ``'fit'`` (which scales maximally
while ensuring none of the video is offscreen, which may result in
letterboxing).
"""
if isinstance(scale, string_types):
_scale = self._ec.window_size_pix / np.array((self.source_width,
self.source_height),
dtype=float)
if scale == 'fit':
scale = _scale.min()
elif scale == 'fill':
scale = _scale.max()
self._scale = float(scale) # allows [1, 1., '1']; others: ValueError
if self._scale <= 0:
raise ValueError('Video scale factor must be strictly positive.')
self._scale_texture()
self.set_pos(self._pos, self._units, self._center)
def set_pos(self, pos, units='norm', center=True):
"""Set video position.
Parameters
----------
pos : array-like
2-element array-like with X, Y elements.
units : str
Units to use for the position. See ``check_units`` for options.
center : bool
If ``False``, the elements of ``pos`` specify the position of the
lower left corner of the video frame; otherwise they position the
center of the frame.
"""
pos = np.array(pos, float)
if pos.size != 2:
raise ValueError('pos must be a 2-element array')
pos = np.reshape(pos, (2, 1))
pix = self._ec._convert_units(pos, units, 'pix').ravel()
offset = np.array((self.width, self.height)) // 2 if center else 0
self._pos = pos
self._actual_pos = pix - offset
self._pos_unit = units
self._pos_centered = center
def _draw(self):
self._texture = self._player.get_texture()
self._scale_texture()
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, 0)
self._texture.blit(*self._actual_pos)
def draw(self):
"""Draw the video texture to the screen buffer."""
self._player.update_texture()
# detect end-of-stream to prevent pyglet from hanging:
if not self._eos:
if self._visible:
self._draw()
else:
self._finished = True
self.pause()
self._ec.check_force_quit()
def set_visible(self, show, flip=False):
"""Show/hide the video frame.
Parameters
----------
show : bool
Show or hide.
flip : bool
If True, flip after showing or hiding.
"""
if show:
self._visible = True
self._draw()
else:
self._visible = False
self._ec.flip()
if flip:
self._ec.flip()
# PROPERTIES
@property
def _eos(self):
return self._eos_fun()
def _eos_old(self):
return (self._player._last_video_timestamp is not None and
self._player._last_video_timestamp ==
self._source.get_next_video_timestamp())
def _eos_new(self):
ts = self._source.get_next_video_timestamp()
dur = self._source._duration
return ts is None or ts >= dur
@property
def playing(self):
return self._playing
@property
def finished(self):
return self._finished
@property
def position(self):
return np.squeeze(self._pos)
@property
def scale(self):
return self._scale
@property
def duration(self):
return self._source.duration
@property
def frame_rate(self):
return self._source.video_format.frame_rate
@property
def dt(self):
return self._dt
@property
def time(self):
return self._player.time
@property
def width(self):
return self.source_width * self._scale
@property
def height(self):
return self.source_height * self._scale
@property
def source_width(self):
return self._source.video_format.width
@property
def source_height(self):
return self._source.video_format.height
@property
def time_offset(self):
return self._ec.get_time() - self._player.time
| bsd-3-clause |
pratapvardhan/scikit-image | skimage/transform/tests/test_radon_transform.py | 13 | 14551 | from __future__ import print_function, division
import numpy as np
from numpy.testing import assert_raises
import itertools
import os.path
from skimage.transform import radon, iradon, iradon_sart, rescale
from skimage.io import imread
from skimage import data_dir
from skimage._shared.testing import test_parallel
from skimage._shared._warnings import expected_warnings
PHANTOM = imread(os.path.join(data_dir, "phantom.png"),
as_grey=True)[::2, ::2]
PHANTOM = rescale(PHANTOM, 0.5, order=1)
def _debug_plot(original, result, sinogram=None):
from matplotlib import pyplot as plt
imkwargs = dict(cmap='gray', interpolation='nearest')
if sinogram is None:
plt.figure(figsize=(15, 6))
sp = 130
else:
plt.figure(figsize=(11, 11))
sp = 221
plt.subplot(sp + 0)
plt.imshow(sinogram, aspect='auto', **imkwargs)
plt.subplot(sp + 1)
plt.imshow(original, **imkwargs)
plt.subplot(sp + 2)
plt.imshow(result, vmin=original.min(), vmax=original.max(), **imkwargs)
plt.subplot(sp + 3)
plt.imshow(result - original, **imkwargs)
plt.colorbar()
plt.show()
def _rescale_intensity(x):
x = x.astype(float)
x -= x.min()
x /= x.max()
return x
def check_radon_center(shape, circle):
# Create a test image with only a single non-zero pixel at the origin
image = np.zeros(shape, dtype=np.float)
image[(shape[0] // 2, shape[1] // 2)] = 1.
# Calculate the sinogram
theta = np.linspace(0., 180., max(shape), endpoint=False)
sinogram = radon(image, theta=theta, circle=circle)
# The sinogram should be a straight, horizontal line
sinogram_max = np.argmax(sinogram, axis=0)
print(sinogram_max)
assert np.std(sinogram_max) < 1e-6
def test_radon_center():
shapes = [(16, 16), (17, 17)]
circles = [False, True]
for shape, circle in itertools.product(shapes, circles):
yield check_radon_center, shape, circle
rectangular_shapes = [(32, 16), (33, 17)]
for shape in rectangular_shapes:
yield check_radon_center, shape, False
def check_iradon_center(size, theta, circle):
debug = False
# Create a test sinogram corresponding to a single projection
# with a single non-zero pixel at the rotation center
if circle:
sinogram = np.zeros((size, 1), dtype=np.float)
sinogram[size // 2, 0] = 1.
else:
diagonal = int(np.ceil(np.sqrt(2) * size))
sinogram = np.zeros((diagonal, 1), dtype=np.float)
sinogram[sinogram.shape[0] // 2, 0] = 1.
maxpoint = np.unravel_index(np.argmax(sinogram), sinogram.shape)
print('shape of generated sinogram', sinogram.shape)
print('maximum in generated sinogram', maxpoint)
# Compare reconstructions for theta=angle and theta=angle + 180;
# these should be exactly equal
reconstruction = iradon(sinogram, theta=[theta], circle=circle)
reconstruction_opposite = iradon(sinogram, theta=[theta + 180],
circle=circle)
print('rms deviance:',
np.sqrt(np.mean((reconstruction_opposite - reconstruction)**2)))
if debug:
import matplotlib.pyplot as plt
imkwargs = dict(cmap='gray', interpolation='nearest')
plt.figure()
plt.subplot(221)
plt.imshow(sinogram, **imkwargs)
plt.subplot(222)
plt.imshow(reconstruction_opposite - reconstruction, **imkwargs)
plt.subplot(223)
plt.imshow(reconstruction, **imkwargs)
plt.subplot(224)
plt.imshow(reconstruction_opposite, **imkwargs)
plt.show()
assert np.allclose(reconstruction, reconstruction_opposite)
def test_iradon_center():
sizes = [16, 17]
thetas = [0, 90]
circles = [False, True]
for size, theta, circle in itertools.product(sizes, thetas, circles):
yield check_iradon_center, size, theta, circle
def check_radon_iradon(interpolation_type, filter_type):
debug = False
image = PHANTOM
reconstructed = iradon(radon(image), filter=filter_type,
interpolation=interpolation_type)
delta = np.mean(np.abs(image - reconstructed))
print('\n\tmean error:', delta)
if debug:
_debug_plot(image, reconstructed)
if filter_type in ('ramp', 'shepp-logan'):
if interpolation_type == 'nearest':
allowed_delta = 0.03
else:
allowed_delta = 0.025
else:
allowed_delta = 0.05
assert delta < allowed_delta
def test_radon_iradon():
filter_types = ["ramp", "shepp-logan", "cosine", "hamming", "hann"]
interpolation_types = ['linear', 'nearest']
for interpolation_type, filter_type in \
itertools.product(interpolation_types, filter_types):
yield check_radon_iradon, interpolation_type, filter_type
# cubic interpolation is slow; only run one test for it
yield check_radon_iradon, 'cubic', 'shepp-logan'
def test_iradon_angles():
"""
Test with different number of projections
"""
size = 100
# Synthetic data
image = np.tri(size) + np.tri(size)[::-1]
# Large number of projections: a good quality is expected
nb_angles = 200
radon_image_200 = radon(image, theta=np.linspace(0, 180, nb_angles,
endpoint=False))
reconstructed = iradon(radon_image_200)
delta_200 = np.mean(abs(_rescale_intensity(image) - _rescale_intensity(reconstructed)))
assert delta_200 < 0.03
# Lower number of projections
nb_angles = 80
radon_image_80 = radon(image, theta=np.linspace(0, 180, nb_angles,
endpoint=False))
# Test whether the sum of all projections is approximately the same
s = radon_image_80.sum(axis=0)
assert np.allclose(s, s[0], rtol=0.01)
reconstructed = iradon(radon_image_80)
delta_80 = np.mean(abs(image / np.max(image) -
reconstructed / np.max(reconstructed)))
# Loss of quality when the number of projections is reduced
assert delta_80 > delta_200
def check_radon_iradon_minimal(shape, slices):
debug = False
theta = np.arange(180)
image = np.zeros(shape, dtype=np.float)
image[slices] = 1.
sinogram = radon(image, theta)
reconstructed = iradon(sinogram, theta)
print('\n\tMaximum deviation:', np.max(np.abs(image - reconstructed)))
if debug:
_debug_plot(image, reconstructed, sinogram)
if image.sum() == 1:
assert (np.unravel_index(np.argmax(reconstructed), image.shape)
== np.unravel_index(np.argmax(image), image.shape))
def test_radon_iradon_minimal():
shapes = [(3, 3), (4, 4), (5, 5)]
for shape in shapes:
c0, c1 = shape[0] // 2, shape[1] // 2
coordinates = itertools.product((c0 - 1, c0, c0 + 1),
(c1 - 1, c1, c1 + 1))
for coordinate in coordinates:
yield check_radon_iradon_minimal, shape, coordinate
def test_reconstruct_with_wrong_angles():
a = np.zeros((3, 3))
p = radon(a, theta=[0, 1, 2])
iradon(p, theta=[0, 1, 2])
assert_raises(ValueError, iradon, p, theta=[0, 1, 2, 3])
def _random_circle(shape):
# Synthetic random data, zero outside reconstruction circle
np.random.seed(98312871)
image = np.random.rand(*shape)
c0, c1 = np.ogrid[0:shape[0], 0:shape[1]]
r = np.sqrt((c0 - shape[0] // 2)**2 + (c1 - shape[1] // 2)**2)
radius = min(shape) // 2
image[r > radius] = 0.
return image
def test_radon_circle():
a = np.ones((10, 10))
with expected_warnings(['reconstruction circle']):
radon(a, circle=True)
# Synthetic data, circular symmetry
shape = (61, 79)
c0, c1 = np.ogrid[0:shape[0], 0:shape[1]]
r = np.sqrt((c0 - shape[0] // 2)**2 + (c1 - shape[1] // 2)**2)
radius = min(shape) // 2
image = np.clip(radius - r, 0, np.inf)
image = _rescale_intensity(image)
angles = np.linspace(0, 180, min(shape), endpoint=False)
sinogram = radon(image, theta=angles, circle=True)
assert np.all(sinogram.std(axis=1) < 1e-2)
# Synthetic data, random
image = _random_circle(shape)
sinogram = radon(image, theta=angles, circle=True)
mass = sinogram.sum(axis=0)
average_mass = mass.mean()
relative_error = np.abs(mass - average_mass) / average_mass
print(relative_error.max(), relative_error.mean())
assert np.all(relative_error < 3.2e-3)
def check_sinogram_circle_to_square(size):
from skimage.transform.radon_transform import _sinogram_circle_to_square
image = _random_circle((size, size))
theta = np.linspace(0., 180., size, False)
sinogram_circle = radon(image, theta, circle=True)
argmax_shape = lambda a: np.unravel_index(np.argmax(a), a.shape)
print('\n\targmax of circle:', argmax_shape(sinogram_circle))
sinogram_square = radon(image, theta, circle=False)
print('\targmax of square:', argmax_shape(sinogram_square))
sinogram_circle_to_square = _sinogram_circle_to_square(sinogram_circle)
print('\targmax of circle to square:',
argmax_shape(sinogram_circle_to_square))
error = abs(sinogram_square - sinogram_circle_to_square)
print(np.mean(error), np.max(error))
assert (argmax_shape(sinogram_square)
== argmax_shape(sinogram_circle_to_square))
def test_sinogram_circle_to_square():
for size in (50, 51):
yield check_sinogram_circle_to_square, size
def check_radon_iradon_circle(interpolation, shape, output_size):
# Forward and inverse radon on synthetic data
image = _random_circle(shape)
radius = min(shape) // 2
sinogram_rectangle = radon(image, circle=False)
reconstruction_rectangle = iradon(sinogram_rectangle,
output_size=output_size,
interpolation=interpolation,
circle=False)
sinogram_circle = radon(image, circle=True)
reconstruction_circle = iradon(sinogram_circle,
output_size=output_size,
interpolation=interpolation,
circle=True)
# Crop rectangular reconstruction to match circle=True reconstruction
width = reconstruction_circle.shape[0]
excess = int(np.ceil((reconstruction_rectangle.shape[0] - width) / 2))
s = np.s_[excess:width + excess, excess:width + excess]
reconstruction_rectangle = reconstruction_rectangle[s]
# Find the reconstruction circle, set reconstruction to zero outside
c0, c1 = np.ogrid[0:width, 0:width]
r = np.sqrt((c0 - width // 2)**2 + (c1 - width // 2)**2)
reconstruction_rectangle[r > radius] = 0.
print(reconstruction_circle.shape)
print(reconstruction_rectangle.shape)
np.allclose(reconstruction_rectangle, reconstruction_circle)
def test_radon_iradon_circle():
shape = (61, 79)
interpolations = ('nearest', 'linear')
output_sizes = (None, min(shape), max(shape), 97)
for interpolation, output_size in itertools.product(interpolations,
output_sizes):
yield check_radon_iradon_circle, interpolation, shape, output_size
def test_order_angles_golden_ratio():
from skimage.transform.radon_transform import order_angles_golden_ratio
np.random.seed(1231)
lengths = [1, 4, 10, 180]
for l in lengths:
theta_ordered = np.linspace(0, 180, l, endpoint=False)
theta_random = np.random.uniform(0, 180, l)
for theta in (theta_random, theta_ordered):
indices = [x for x in order_angles_golden_ratio(theta)]
# no duplicate indices allowed
assert len(indices) == len(set(indices))
@test_parallel()
def test_iradon_sart():
debug = False
image = rescale(PHANTOM, 0.8)
theta_ordered = np.linspace(0., 180., image.shape[0], endpoint=False)
theta_missing_wedge = np.linspace(0., 150., image.shape[0], endpoint=True)
for theta, error_factor in ((theta_ordered, 1.),
(theta_missing_wedge, 2.)):
sinogram = radon(image, theta, circle=True)
reconstructed = iradon_sart(sinogram, theta)
if debug:
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(221)
plt.imshow(image, interpolation='nearest')
plt.subplot(222)
plt.imshow(sinogram, interpolation='nearest')
plt.subplot(223)
plt.imshow(reconstructed, interpolation='nearest')
plt.subplot(224)
plt.imshow(reconstructed - image, interpolation='nearest')
plt.show()
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration) =', delta)
assert delta < 0.02 * error_factor
reconstructed = iradon_sart(sinogram, theta, reconstructed)
delta = np.mean(np.abs(reconstructed - image))
print('delta (2 iterations) =', delta)
assert delta < 0.014 * error_factor
reconstructed = iradon_sart(sinogram, theta, clip=(0, 1))
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration, clip) =', delta)
assert delta < 0.018 * error_factor
np.random.seed(1239867)
shifts = np.random.uniform(-3, 3, sinogram.shape[1])
x = np.arange(sinogram.shape[0])
sinogram_shifted = np.vstack(np.interp(x + shifts[i], x,
sinogram[:, i])
for i in range(sinogram.shape[1])).T
reconstructed = iradon_sart(sinogram_shifted, theta,
projection_shifts=shifts)
if debug:
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(221)
plt.imshow(image, interpolation='nearest')
plt.subplot(222)
plt.imshow(sinogram_shifted, interpolation='nearest')
plt.subplot(223)
plt.imshow(reconstructed, interpolation='nearest')
plt.subplot(224)
plt.imshow(reconstructed - image, interpolation='nearest')
plt.show()
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration, shifted sinogram) =', delta)
assert delta < 0.022 * error_factor
if __name__ == "__main__":
from numpy.testing import run_module_suite
run_module_suite()
| bsd-3-clause |
agiovann/CalBlitz | calblitz/granule_cells/utils_granule.py | 1 | 44647 | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 16 17:56:14 2016
@author: agiovann
"""
import os
import cv2
import h5py
import numpy as np
import pylab as pl
from glob import glob
# import ca_source_extraction as cse
import calblitz as cb
from scipy import signal
import scipy
import sys
from ipyparallel import Client
from time import time
from scipy.sparse import csc,csr,coo_matrix
from scipy.spatial.distance import cdist
from scipy import ndimage
from scipy.optimize import linear_sum_assignment
from sklearn.utils.linear_assignment_ import linear_assignment
import re
import pickle
#%% Process triggers
def extract_triggers(file_list,read_dictionaries=False):
"""Extract triggers from Bens' tiff file and create readable dictionaries
Parameterskdkd
-----------
file_list: list of tif files or npz files containing the iage description
Returns
-------
triggers: list
[idx_CS, idx_US, trial_type, number_of_frames]. Trial types: 0 CS alone, 1 US alone, 2 CS US
trigger_names: list
file name associated (without extension)
Example:
fls=glob.glob('2016*.tif')
fls.sort()
triggers,trigger_names=extract_triggers(fls[:5],read_dictionaries=False)
np.savez('all_triggers.npz',triggers=triggers,trigger_names=trigger_names)
"""
triggers=[]
trigger_names=[]
for fl in file_list:
print fl
fn=fl[:-4]+'_ImgDescr.npz'
if read_dictionaries:
with np.load(fn) as idr:
image_descriptions=idr['image_descriptions']
else:
image_descriptions=cb.utils.get_image_description_SI(fl)
print '*****************'
np.savez(fn,image_descriptions=image_descriptions)
trig_vect=np.zeros(4)*np.nan
for idx,image_description in enumerate(image_descriptions):
i2cd=image_description['I2CData']
if type(i2cd) is str:
if i2cd.find('US_ON')>=0:
trig_vect[1]=image_description['frameNumberAcquisition']-1
if i2cd.find('CS_ON')>=0:
trig_vect[0]=image_description['frameNumberAcquisition']-1
if np.nansum(trig_vect>0)==2:
trig_vect[2]=2
elif trig_vect[0]>0:
trig_vect[2]=0
elif trig_vect[1]>0:
trig_vect[2]=1
else:
raise Exception('No triggers present in trial')
trig_vect[3]=idx+1
triggers.append(trig_vect)
trigger_names.append(fl[:-4])
print triggers[-1]
return triggers,trigger_names
#%%
def downsample_triggers(triggers,fraction_downsample=1):
""" downample triggers so as to make them in line with the movies
Parameters
----------
triggers: list=Ftraces[idx]
output of extract_triggers function
fraction_downsample: float
fraction the data is shrinked in the time axis
"""
triggers[:,[0,1,3]]=np.round(triggers[:,[0,1,3]]*fraction_downsample)
# triggers[-1,[0,1,3]]=np.floor(triggers[-1,[0,1,3]]*fraction_downsample)
# triggers[-1]=np.cumsum(triggers[-1])
# real_triggers=triggers[:-1]+np.concatenate([np.atleast_1d(0), triggers[-1,:-1]])[np.newaxis,:]
#
# trg=real_triggers[1][triggers[-2]==2]+np.arange(-5,8)[:,np.newaxis]
#
# trg=np.int64(trg)
return triggers
#%%
def get_behavior_traces(fname,t0,t1,freq,ISI,draw_rois=False,plot_traces=False,mov_filt_1d=True,window_hp=201,window_lp=3,interpolate=True,EXPECTED_ISI=.25):
"""
From hdf5 movies extract eyelid closure and wheel movement
Parameters
----------
fname: str
file name of the hdf5 file
t0,t1: float.
Times of beginning and end of trials (in general 0 and 8 for our dataset) to build the absolute time vector
freq: float
frequency used to build the final time vector
ISI: float
inter stimulu interval
draw_rois: bool
whether to manually draw the eyelid contour
plot_traces: bool
whether to plot the traces during extraction
mov_filt_1d: bool
whether to filter the movie after extracting the average or ROIs. The alternative is a 3D filter that can be very computationally expensive
window_lp, window_hp: ints
number of frames to be used to median filter the data. It is needed because of the light IR artifact coming out of the eye
Returns
-------
res: dict
dictionary with fields
'eyelid': eyelid trace
'wheel': wheel trace
'time': absolute tim vector
'trials': corresponding indexes of the trials
'trial_info': for each trial it returns start trial, end trial, time CS, time US, trial type (CS:0 US:1 CS+US:2)
'idx_CS_US': idx trial CS US
'idx_US': idx trial US
'idx_CS': idx trial CS
"""
CS_ALONE=0
US_ALONE=1
CS_US=2
meta_inf = fname[:-7]+'data.h5'
time_abs=np.linspace(t0,t1,freq*(t1-t0))
T=len(time_abs)
t_us=0
t_cs=0
n_samples_ISI=np.int(ISI*freq)
t_uss=[]
ISIs=[]
eye_traces=[]
wheel_traces=[]
trial_info=[]
tims=[]
with h5py.File(fname) as f:
with h5py.File(meta_inf) as dt:
rois=np.asarray(dt['roi'],np.float32)
trials = f.keys()
trials.sort(key=lambda(x): np.int(x.replace('trial_','')))
trials_idx=[np.int(x.replace('trial_',''))-1 for x in trials]
trials_idx_=[]
for tr,idx_tr in zip(trials[:],trials_idx[:]):
if plot_traces:
pl.cla()
print tr
trial=f[tr]
mov=np.asarray(trial['mov'])
if draw_rois:
pl.imshow(np.mean(mov,0))
pl.xlabel('Draw eye')
pts=pl.ginput(-1)
pts = np.asarray(pts, dtype=np.int32)
data = np.zeros(np.shape(mov)[1:], dtype=np.int32)
# if CV_VERSION == 2:
#lt = cv2.CV_AA
# elif CV_VERSION == 3:
lt = cv2.LINE_AA
cv2.fillConvexPoly(data, pts, (1,1,1), lineType=lt)
rois[0]=data
pl.close()
pl.imshow(np.mean(mov,0))
pl.xlabel('Draw wheel')
pts=pl.ginput(-1)
pts = np.asarray(pts, dtype=np.int32)
data = np.zeros(np.shape(mov)[1:], dtype=np.int32)
# if CV_VERSION == 2:
#lt = cv2.CV_AA
# elif CV_VERSION == 3:
lt = cv2.LINE_AA
cv2.fillConvexPoly(data, pts, (1,1,1), lineType=lt)
rois[1]=data
pl.close()
# eye_trace=np.mean(mov*rois[0],axis=(1,2))
# mov_trace=np.mean((np.diff(np.asarray(mov,dtype=np.float32),axis=0)**2)*rois[1],axis=(1,2))
mov=np.transpose(mov,[0,2,1])
mov=mov[:,:,::-1]
if mov.shape[0]>0:
ts=np.array(trial['ts'])
if np.size(ts)>0:
assert np.std(np.diff(ts))<0.005, 'Time stamps of behaviour are unreliable'
if interpolate:
new_ts=np.linspace(0,ts[-1,0]-ts[0,0],np.shape(mov)[0])
if dt['trials'][idx_tr,-1] == US_ALONE:
t_us=np.maximum(t_us,dt['trials'][idx_tr,3]-dt['trials'][idx_tr,0])
mmm=mov[:n_samples_ISI].copy()
mov=mov[:-n_samples_ISI]
mov=np.concatenate([mmm,mov])
elif dt['trials'][idx_tr,-1] == CS_US:
t_cs=np.maximum(t_cs,dt['trials'][idx_tr,2]-dt['trials'][idx_tr,0])
t_us=np.maximum(t_us,dt['trials'][idx_tr,3]-dt['trials'][idx_tr,0])
t_uss.append(t_us)
ISI=t_us-t_cs
ISIs.append(ISI)
n_samples_ISI=np.int(ISI*freq)
else:
t_cs=np.maximum(t_cs,dt['trials'][idx_tr,2]-dt['trials'][idx_tr,0])
new_ts=new_ts
tims.append(new_ts)
else:
start,end,t_CS,t_US= dt['trials'][idx_tr,:-1]-dt['trials'][idx_tr,0]
f_rate=np.median(np.diff(ts[:,0]))
ISI=t_US-t_CS
idx_US=np.int(t_US/f_rate)
idx_CS=np.int(t_CS/f_rate)
fr_before_US=np.int((t_US - start -.1)/f_rate)
fr_after_US=np.int((end -.1 - t_US)/f_rate)
idx_abs=np.arange(-fr_before_US,fr_after_US)
time_abs=idx_abs*f_rate
assert np.abs(ISI-EXPECTED_ISI)<.01, str(np.abs(ISI-EXPECTED_ISI)) + ':the distance form CS and US is different from what expected'
# trig_US=
# new_ts=
mov_e=cb.movie(mov*rois[0][::-1].T,fr=1/np.mean(np.diff(new_ts)))
mov_w=cb.movie(mov*rois[1][::-1].T,fr=1/np.mean(np.diff(new_ts)))
x_max_w,y_max_w=np.max(np.nonzero(np.max(mov_w,0)),1)
x_min_w,y_min_w=np.min(np.nonzero(np.max(mov_w,0)),1)
x_max_e,y_max_e=np.max(np.nonzero(np.max(mov_e,0)),1)
x_min_e,y_min_e=np.min(np.nonzero(np.max(mov_e,0)),1)
mov_e=mov_e[:,x_min_e:x_max_e,y_min_e:y_max_e]
mov_w=mov_w[:,x_min_w:x_max_w,y_min_w:y_max_w]
# mpart=mov[:20].copy()
# md=cse.utilities.mode_robust(mpart.flatten())
# N=np.sum(mpart<=md)
# mpart[mpart>md]=md
# mpart[mpart==0]=md
# mpart=mpart-md
# std=np.sqrt(np.sum(mpart**2)/N)
# thr=md+10*std
#
# thr=np.minimum(255,thr)
# return mov
if mov_filt_1d:
mov_e=np.mean(mov_e, axis=(1,2))
window_hp_=window_hp
window_lp_=window_lp
if plot_traces:
pl.plot((mov_e-np.mean(mov_e))/(np.max(mov_e)-np.min(mov_e)))
else:
window_hp_=(window_hp,1,1)
window_lp_=(window_lp,1,1)
bl=signal.medfilt(mov_e,window_hp_)
mov_e=signal.medfilt(mov_e-bl,window_lp_)
if mov_filt_1d:
eye_=np.atleast_2d(mov_e)
else:
eye_=np.atleast_2d(np.mean(mov_e, axis=(1,2)))
wheel_=np.concatenate([np.atleast_1d(0),np.nanmean(np.diff(mov_w,axis=0)**2,axis=(1,2))])
if np.abs(new_ts[-1] - time_abs[-1])>1:
raise Exception('Time duration is significantly larger or smaller than reference time')
wheel_=np.squeeze(wheel_)
eye_=np.squeeze(eye_)
f1=scipy.interpolate.interp1d(new_ts , eye_,bounds_error=False,kind='linear')
eye_=np.array(f1(time_abs))
f1=scipy.interpolate.interp1d(new_ts , wheel_,bounds_error=False,kind='linear')
wheel_=np.array(f1(time_abs))
if plot_traces:
pl.plot( (eye_) / (np.nanmax(eye_)-np.nanmin(eye_)),'r')
pl.plot( (wheel_ -np.nanmin(wheel_))/ np.nanmax(wheel_),'k')
pl.pause(.01)
trials_idx_.append(idx_tr)
eye_traces.append(eye_)
wheel_traces.append(wheel_)
trial_info.append(dt['trials'][idx_tr,:])
res=dict()
res['eyelid'] = eye_traces
res['wheel'] = wheel_traces
res['time'] = time_abs - np.median(t_uss)
res['trials'] = trials_idx_
res['trial_info'] = trial_info
res['idx_CS_US'] = np.where(map(int,np.array(trial_info)[:,-1]==CS_US))[0]
res['idx_US'] = np.where(map(int,np.array(trial_info)[:,-1]==US_ALONE))[0]
res['idx_CS'] = np.where(map(int,np.array(trial_info)[:,-1]==CS_ALONE))[0]
return res
#%%
def process_eyelid_traces(traces,time_vect,idx_CS_US,idx_US,idx_CS,thresh_CR=.1,time_CR_on=-.1,time_US_on=.05):
"""
preprocess traces output of get_behavior_traces
Parameters:
----------
traces: ndarray (N trials X t time points)
eyelid traces output of get_behavior_traces.
thresh_CR: float
fraction of eyelid closure considered a CR
time_CR_on: float
time of alleged beginning of CRs
time_US_on: float
time when US is considered to induce have a UR
Returns:
-------
eye_traces: ndarray
normalized eyelid traces
trigs: dict
dictionary containing various subdivision of the triggers according to behavioral responses
'idxCSUSCR': index of trials with CS+US with CR
'idxCSUSNOCR': index of trials with CS+US without CR
'idxCSCR':
'idxCSNOCR':
'idxNOCR': index of trials with no CRs
'idxCR': index of trials with CRs
'idxUS':
"""
#normalize by max amplitudes at US
eye_traces=traces/np.nanmax(np.nanmedian(traces[np.hstack([idx_CS_US,idx_US])][:,np.logical_and(time_vect>time_US_on,time_vect<time_US_on +.4 )],0))
amplitudes_at_US=np.mean(eye_traces[:,np.logical_and( time_vect > time_CR_on , time_vect <= time_US_on )],1)
trigs=dict()
trigs['idxCSUSCR']=idx_CS_US[np.where(amplitudes_at_US[idx_CS_US]>thresh_CR)[-1]]
trigs['idxCSUSNOCR']=idx_CS_US[np.where(amplitudes_at_US[idx_CS_US]<thresh_CR)[-1]]
trigs['idxCSCR']=idx_CS[np.where(amplitudes_at_US[idx_CS]>thresh_CR)[-1]]
trigs['idxCSNOCR']=idx_CS[np.where(amplitudes_at_US[idx_CS]<thresh_CR)[-1]]
trigs['idxNOCR']=np.union1d(trigs['idxCSUSNOCR'],trigs['idxCSNOCR'])
trigs['idxCR']=np.union1d(trigs['idxCSUSCR'],trigs['idxCSCR'])
trigs['idxUS']=idx_US
return eye_traces,amplitudes_at_US, trigs
#%%
def process_wheel_traces(traces,time_vect,thresh_MOV_iqr=3,time_CS_on=-.25,time_US_on=0):
tmp = traces[:,time_vect<time_CS_on]
wheel_traces=traces/(np.percentile(tmp,75)-np.percentile(tmp,25))
movement_at_CS=np.max(wheel_traces[:,np.logical_and( time_vect > time_CS_on, time_vect <= time_US_on )],1)
trigs=dict()
trigs['idxMOV']=np.where(movement_at_CS>thresh_MOV_iqr)[-1]
trigs['idxNO_MOV']=np.where(movement_at_CS<thresh_MOV_iqr)[-1]
return wheel_traces, movement_at_CS, trigs
#%%
def process_wheel_traces_talmo(wheel_mms_TM_,timestamps_TM_,tm,thresh_MOV=.2,time_CS_on=-.25,time_US_on=0):
wheel_traces=[]
for tr_,tm_ in zip(wheel_mms_TM_,timestamps_TM_):
if len(tm_)<len(tm):
#print ['Adjusting the samples:',len(tm)-len(tm_)]
wheel_traces.append(np.pad(tr_,(0,len(tm)-len(tm_)),mode='edge'))
elif len(tm_)>len(tm):
wheel_traces.append(tr_[len(tm_)-len(tm):])
#print ['Removing the samples:',len(tm)-len(tm_)]
else:
wheel_traces.append(tr_)
# wheel_traces=np.abs(np.array(wheel_traces))/10 # to cm
# tmp = traces[:,time_vect<time_CS_on]
wheel_traces=np.abs(np.array(wheel_traces))
# wheel_traces=traces/(np.percentile(tmp,75)-np.percentile(tmp,25))
movement_at_CS=np.max(wheel_traces[:,np.logical_and( tm > time_CS_on, tm <= time_US_on )],1)
trigs=dict()
trigs['idxMOV']=np.where(movement_at_CS>thresh_MOV)[-1]
trigs['idxNO_MOV']=np.where(movement_at_CS<thresh_MOV)[-1]
return wheel_traces, movement_at_CS, trigs
#%%
def load_results(f_results):
"""
Load results from CNMF on various FOVs and merge them after some preprocessing
"""
# load data
i=0
A_s=[]
C_s=[]
YrA_s=[]
Cn_s=[]
shape = None
b_s=[]
f_s=[]
for f_res in f_results:
print f_res
i+=1
with np.load(f_res) as ld:
A_s.append(csc.csc_matrix(ld['A2']))
C_s.append(ld['C2'])
YrA_s.append(ld['YrA'])
Cn_s.append(ld['Cn'])
b_s.append(ld['b2'])
f_s.append(ld['f2'])
if shape is not None:
shape_new=(ld['d1'],ld['d2'])
if shape_new != shape:
raise Exception('Shapes of FOVs not matching')
else:
shape = shape_new
else:
shape=(ld['d1'],ld['d2'])
return A_s,C_s,YrA_s, Cn_s, b_s, f_s, shape
#%% threshold and remove spurious components
def threshold_components(A_s,shape,min_size=5,max_size=np.inf,max_perc=.5,remove_unconnected_components=True):
"""
Threshold components output of a CNMF algorithm (A matrices)
Parameters:
----------
A_s: list
list of A matrice output from CNMF
min_size: int
min size of the component in pixels
max_size: int
max size of the component in pixels
max_perc: float
fraction of the maximum of each component used to threshold
remove_unconnected_components: boolean
whether to remove components that are fragmented in space
Returns:
-------
B_s: list of the thresholded components
lab_imgs: image representing the components in ndimage format
cm_s: center of masses of each components
"""
B_s=[]
lab_imgs=[]
cm_s=[]
for A_ in A_s:
print '*'
max_comps=A_.max(0).todense().T
tmp=[]
cm=[]
lim=np.zeros(shape)
for idx,a in enumerate(A_.T):
#create mask by thresholding to 50% of the max
mask=np.reshape(a.todense()>(max_comps[idx]*max_perc),shape)
label_im, nb_labels = ndimage.label(mask)
sizes = ndimage.sum(mask, label_im, range(nb_labels + 1))
if remove_unconnected_components:
l_largest=(label_im==np.argmax(sizes))
cm.append(scipy.ndimage.measurements.center_of_mass(l_largest,l_largest))
lim[l_largest] = (idx+1)
# #remove connected components that are too small
mask_size=np.logical_or(sizes<min_size,sizes>max_size)
if np.sum(mask_size[1:])>1:
print 'removing ' + str( np.sum(mask_size[1:])-1) + ' components'
remove_pixel=mask_size[label_im]
label_im[remove_pixel] = 0
label_im=(label_im>0)*1
tmp.append(label_im.flatten())
cm_s.append(cm)
lab_imgs.append(lim)
B_s.append(csc.csc_matrix(np.array(tmp)).T)
return B_s, lab_imgs, cm_s
#%% compute mask distances
def distance_masks(M_s,cm_s,max_dist):
"""
Compute distance matrix based on an intersection over union metric. Matrix are compared in order, with matrix i compared with matrix i+1
Parameters
----------
M_s: list of ndarrays
The thresholded A matrices (masks) to compare, output of threshold_components
cm_s: list of list of 2-ples
the centroids of the components in each M_s
max_dist: float
maximum distance among centroids allowed between components. This corresponds to a distance at which two components are surely disjoined
Returns:
--------
D_s: list of matrix distances
"""
D_s=[]
for M1,M2,cm1,cm2 in zip(M_s[:-1],M_s[1:],cm_s[:-1],cm_s[1:]):
print 'New Pair **'
M1= M1.copy()[:,:]
M2= M2.copy()[:,:]
d_1=np.shape(M1)[-1]
d_2=np.shape(M2)[-1]
D = np.ones((d_1,d_2));
cm1=np.array(cm1)
cm2=np.array(cm2)
for i in range(d_1):
if i%100==0:
print i
k=M1[:,np.repeat(i,d_2)]+M2
# h=M1[:,np.repeat(i,d_2)].copy()
# h.multiply(M2)
for j in range(d_2):
dist = np.linalg.norm(cm1[i]-cm2[j])
if dist<max_dist:
union = k[:,j].sum()
# intersection = h[:,j].nnz
intersection= np.array(M1[:,i].T.dot(M2[:,j]).todense()).squeeze()
## intersect= np.sum(np.logical_xor(M1[:,i],M2[:,j]))
## union=np.sum(np.logical_or(M1[:,i],M2[:,j]))
if union > 0:
D[i,j] = 1-1.*intersection/(union-intersection)
else:
# print 'empty component: setting distance to max'
D[i,j] = 1.
if np.isnan(D[i,j]):
raise Exception('Nan value produced. Error in inputs')
else:
D[i,j] = 1
D_s.append(D)
return D_s
#%% find matches
def find_matches(D_s, print_assignment=False):
matches=[]
costs=[]
t_start=time()
for ii,D in enumerate(D_s):
DD=D.copy()
if np.sum(np.where(np.isnan(DD)))>0:
raise Exception('Distance Matrix contains NaN, not allowed!')
# indexes = m.compute(DD)
# indexes = linear_assignment(DD)
indexes = linear_sum_assignment(DD)
indexes2=[(ind1,ind2) for ind1,ind2 in zip(indexes[0],indexes[1])]
matches.append(indexes)
DD=D.copy()
total = []
for row, column in indexes2:
value = DD[row,column]
if print_assignment:
print '(%d, %d) -> %f' % (row, column, value)
total.append(value)
print 'FOV: %d, shape: %d,%d total cost: %f' % (ii, DD.shape[0],DD.shape[1], np.sum(total))
print time()-t_start
costs.append(total)
return matches,costs
#%%
def link_neurons(matches,costs,max_cost=0.6,min_FOV_present=None):
"""
Link neurons from different FOVs given matches and costs obtained from the hungarian algorithm
Parameters
----------
matches: lists of list of tuple
output of the find_matches function
costs: list of lists of scalars
cost associated to each match in matches
max_cost: float
maximum allowed value of the 1- intersection over union metric
min_FOV_present: int
number of FOVs that must consequently contain the neuron starting from 0. If none
the neuro must be present in each FOV
Returns:
--------
neurons: list of arrays representing the indices of neurons in each FOV
"""
if min_FOV_present is None:
min_FOV_present=len(matches)
neurons=[]
num_neurons=0
# Yr_tot=[]
num_chunks=len(matches)+1
for idx in range(len(matches[0][0])):
neuron=[]
neuron.append(idx)
# Yr=YrA_s[0][idx]+C_s[0][idx]
for match,cost,chk in zip(matches,costs,range(1,num_chunks)):
rows,cols=match
m_neur=np.where(rows==neuron[-1])[0].squeeze()
if m_neur.size > 0:
if cost[m_neur]<=max_cost:
neuron.append(cols[m_neur])
# Yr=np.hstack([Yr,YrA_s[chk][idx]+C_s[chk][idx]])
else:
break
else:
break
if len(neuron)>min_FOV_present:
num_neurons+=1
neurons.append(neuron)
# Yr_tot.append(Yr)
neurons=np.array(neurons).T
print 'num_neurons:' + str(num_neurons)
# Yr_tot=np.array(Yr_tot)
return neurons
#%%
def generate_linked_traces(mov_names,chunk_sizes,A,b,f):
"""
Generate traces (DFF,BL and DF) for a group of movies that share the same A,b and f,
by applying the same transformation over a set of movies. This removes
the contamination of neuropil and then masks the components.
Parameters:
-----------
mov_names: list of path to movies associated with the same A,b,and f
chunk_sizes:list containing the number of frames in each movie
A,b and f: from CNMF
Returns:
--------
"""
num_chunks=np.sum(chunk_sizes)
# A = A_s[idx][:,neurons[idx]]
nA = (A.power(2)).sum(0)
# bckg=cb.movie(cb.to_3D(b.dot(f).T,(-1,shape[0],shape[1])),fr=1)
f=np.array(f).squeeze()
# bckg=bckg.resize(1,1,1.*num_chunks/b_size)
b_size=f.shape[0]
# if num_chunks != b_size:
# raise Exception('The number of frames are not matching')
#
counter=0
f_in=np.atleast_2d(scipy.signal.resample(f,num_chunks))
# C,f,S,bl,c1,neurons_sn,g,YrA = cse.temporal.update_temporal_components(Yr,A,b,C_in,f_in,p=0)
traces=[]
traces_BL=[]
traces_DFF=[]
for jj,mv in enumerate(mov_names):
mov_chunk_name=os.path.splitext(os.path.split(mv)[-1])[0]+'.hdf5'
mov_chunk_name=os.path.join(os.path.dirname(mv),mov_chunk_name)
print mov_chunk_name
m=cb.load(mov_chunk_name).to_2D().T
bckg_1=b.dot(f_in[:,counter:counter+chunk_sizes[jj]])
m=m-bckg_1
# (m).play(backend='opencv',gain=10.,fr=33)
# m=np.reshape(m,(-1,np.prod(shape)),order='F').T
# bckg_1=np.reshape(bckg_1,(-1,np.prod(shape)),order='F').T
counter+=chunk_sizes[jj]
Y_r_sig=A.T.dot(m)
Y_r_sig= scipy.sparse.linalg.spsolve(scipy.sparse.spdiags(np.sqrt(nA),0,nA.size,nA.size),Y_r_sig)
traces.append(Y_r_sig)
Y_r_bl=A.T.dot(bckg_1)
Y_r_bl= scipy.sparse.linalg.spsolve(scipy.sparse.spdiags(np.sqrt(nA),0,nA.size,nA.size),Y_r_bl)
traces_BL.append(Y_r_bl)
Y_r_bl=cse.utilities.mode_robust(Y_r_bl,1)
traces_DFF.append(Y_r_sig/Y_r_bl[:,np.newaxis])
return traces,traces_DFF,traces_BL
#%%
def extract_traces_mat(traces,triggers_idx,f_rate,time_before=2.7,time_after=5.3):
"""
Equivalent of take for the input format we are using.
Parameters:
-----------
traces: list of ndarrays
each element is one trial, the dimensions are n_neurons x time
triggers_idx: list of ints
one for each element of traces, is the index of the trigger to align the traces to
f_rate: double
frame rate associated to the traces
time_before,time_after: double
time before and after the trigger establishing the boundary of the extracted subtraces
Returns:
--------
traces_mat: matrix containing traces with dimensions trials X cell X time
time_mat: associated time vector
"""
samples_before = np.int(time_before*f_rate)
samples_after = np.int(time_after*f_rate)
if traces[0].ndim > 1:
traces_mat = np.zeros([len(traces),len(traces[0]),samples_after+samples_before])
else:
traces_mat = np.zeros([len(traces),1,samples_after+samples_before])
for idx,tr in enumerate(traces):
# print samples_before,samples_after
# print np.int(triggers_idx[idx]-samples_before),np.int(triggers_idx[idx]+samples_after)
traces_mat[idx]=traces[idx][:,np.int(triggers_idx[idx]-samples_before):np.int(triggers_idx[idx]+samples_after)]
time_mat=np.arange(-samples_before,samples_after)/f_rate
return traces_mat,time_mat
#%%
def load_data_from_stored_results(base_folder, load_masks=False, thresh_CR = 0.1,threshold_responsiveness=0.1,
is_blob=True,time_CR_on=-.1,time_US_on=.05,thresh_MOV_iqr=1000,time_CS_on_MOV=-.25,time_US_on_MOV=0):
"""
From the partial data stored retrieves variables of interest
"""
import calblitz as cb
import numpy as np
import scipy
import pylab as pl
import pickle
from glob import glob
# base_folder='/mnt/ceph/users/agiovann/ImagingData/eyeblink/b35/20160714143248/'
if is_blob:
with np.load(base_folder+'distance_masks.npz') as ld:
D_s=ld['D_s']
with np.load(base_folder+'neurons_matching.npz') as ld:
neurons=ld['neurons']
locals().update(ld)
with np.load(base_folder+'all_triggers.npz') as at:
triggers_img=at['triggers']
trigger_names_img=at['trigger_names']
if load_masks:
f_results= glob(base_folder+'*results_analysis.npz')
f_results.sort()
for rs in f_results:
print rs
print '*****'
A_s,C_s,YrA_s, Cn_s, b_s, f_s, shape = load_results(f_results)
if is_blob:
remove_unconnected_components=True
else:
remove_unconnected_components=False
neurons=[]
for xx in A_s:
neurons.append(np.arange(A_s[0].shape[-1]))
# B_s, lab_imgs, cm_s = threshold_components(A_s,shape, min_size=5,max_size=50,max_perc=.5,remove_unconnected_components=remove_unconnected_components)
tmpl_name=glob(base_folder+'*template_total.npz')[0]
with np.load(tmpl_name) as ld:
mov_names_each=ld['movie_names']
A_each=[]
b_each=[]
f_each=[]
for idx, mov_names in enumerate(mov_names_each):
idx=0
A_each.append(A_s[idx][:,neurons[idx]])
# C=C_s[idx][neurons[idx]]
# YrA=YrA_s[idx][neurons[idx]]
b_each.append(b_s[idx])
f_each.append(f_s[idx])
else:
A_each=[]
b_each=[]
f_each=[]
with np.load(base_folder+'behavioral_traces.npz') as ld:
res_bt = dict(**ld)
tm=res_bt['time']
f_rate_bh=1/np.median(np.diff(tm))
ISI=res_bt['trial_info'][0][3]-res_bt['trial_info'][0][2]
eye_traces=np.array(res_bt['eyelid'])
idx_CS_US=res_bt['idx_CS_US']
idx_US=res_bt['idx_US']
idx_CS=res_bt['idx_CS']
idx_ALL=np.sort(np.hstack([idx_CS_US,idx_US,idx_CS]))
eye_traces,amplitudes_at_US, trig_CRs=process_eyelid_traces(eye_traces,tm,idx_CS_US,idx_US,idx_CS,thresh_CR=thresh_CR,time_CR_on=time_CR_on,time_US_on=time_US_on)
idxCSUSCR = trig_CRs['idxCSUSCR']
idxCSUSNOCR = trig_CRs['idxCSUSNOCR']
idxCSCR = trig_CRs['idxCSCR']
idxCSNOCR = trig_CRs['idxCSNOCR']
idxNOCR = trig_CRs['idxNOCR']
idxCR = trig_CRs['idxCR']
idxUS = trig_CRs['idxUS']
idxCSCSUS=np.concatenate([idx_CS,idx_CS_US])
with open(base_folder+'traces.pk','r') as f:
trdict= pickle.load(f)
traces_DFF=trdict['traces_DFF']
triggers_img=np.array(triggers_img)
idx_expected_US = np.zeros_like(triggers_img[:,1])
idx_expected_US = triggers_img[:,1]
idx_expected_US[idx_CS]=np.nanmedian(triggers_img[:,1])
triggers_img = np.concatenate([triggers_img, idx_expected_US[:,np.newaxis].astype(np.int)],-1)
img_descr=cb.utils.get_image_description_SI(glob(base_folder+'2016*.tif')[0])[0]
f_rate=img_descr['scanimage.SI.hRoiManager.scanFrameRate']
print f_rate
#%%
time_before=3
time_after=3
wheel,time_w=res_bt['wheel'],res_bt['time']
eye=eye_traces
time_e=tm
wheel_mat=np.array([wh[np.logical_and(time_w>-time_before,time_w<time_after)] for wh in wheel])
eye_mat=np.array([e[np.logical_and(time_e>-time_before,time_e<time_after)] for e in eye])
time_w_mat=time_w[np.logical_and(time_w>-time_before,time_w<time_after)]
time_e_mat=time_e[np.logical_and(time_e>-time_before,time_e<time_after)]
traces_mat,time_mat=extract_traces_mat(traces_DFF,triggers_img[:,1],f_rate,time_before=time_before,time_after=time_after)
# traces_mat,time_mat=scipy.signal.resample(traces_mat, len(time_w_mat),t=time_mat ,axis=-1)
#%
wheel_traces, movement_at_CS, trigs_mov = process_wheel_traces(np.array(res_bt['wheel']),tm,thresh_MOV_iqr=thresh_MOV_iqr,time_CS_on=time_CS_on_MOV,time_US_on=time_US_on_MOV)
print 'fraction with movement:'
print len(trigs_mov['idxMOV'])*1./len(trigs_mov['idxNO_MOV'])
#%%
triggers_out=dict()
triggers_out['mn_idx_CS_US'] =np.intersect1d(idx_CS_US,trigs_mov['idxNO_MOV'])
triggers_out['nm_idx_US']= np.intersect1d(idx_US,trigs_mov['idxNO_MOV'])
triggers_out['nm_idx_CS']= np.intersect1d(idx_CS,trigs_mov['idxNO_MOV'])
triggers_out['nm_idxCSUSCR'] = np.intersect1d(idxCSUSCR,trigs_mov['idxNO_MOV'])
triggers_out['nm_idxCSUSNOCR'] = np.intersect1d(idxCSUSNOCR,trigs_mov['idxNO_MOV'])
triggers_out['nm_idxCSCR'] = np.intersect1d(idxCSCR,trigs_mov['idxNO_MOV'])
triggers_out['nm_idxCSNOCR'] = np.intersect1d(idxCSNOCR,trigs_mov['idxNO_MOV'])
triggers_out['nm_idxNOCR'] = np.intersect1d(idxNOCR,trigs_mov['idxNO_MOV'])
triggers_out['nm_idxCR'] = np.intersect1d(idxCR,trigs_mov['idxNO_MOV'])
triggers_out['nm_idxUS'] = np.intersect1d(idxUS,trigs_mov['idxNO_MOV'])
triggers_out['nm_idxCSCSUS']=np.intersect1d(idxCSCSUS,trigs_mov['idxNO_MOV'])
#%%
newf_rate=1/np.median(np.diff(time_mat))
ftraces=traces_mat.copy()
samples_before=np.int(time_before*newf_rate)
ISI_frames=np.int(ISI*newf_rate)
ftraces=ftraces-np.median(ftraces[:,:,np.logical_and(time_mat>-1,time_mat<-ISI)],axis=(2))[:,:,np.newaxis]
amplitudes_responses=np.mean(ftraces[:,:,np.logical_and(time_mat>-.03,time_mat<.04)],-1)
cell_responsiveness=np.median(amplitudes_responses[triggers_out['nm_idxCSCSUS']],axis=0)
idx_responsive = np.where(cell_responsiveness>threshold_responsiveness)[0]
fraction_responsive=len(np.where(cell_responsiveness>threshold_responsiveness)[0])*1./np.shape(ftraces)[1]
print 'fraction responsive:'
print fraction_responsive
ftraces=ftraces[:,cell_responsiveness>threshold_responsiveness,:]
amplitudes_responses=np.mean(ftraces[:,:,samples_before+ISI_frames-1:samples_before+ISI_frames+1],-1)
traces=dict()
traces['fluo_traces']=ftraces
traces['eye_traces']=eye_mat
traces['wheel_traces']=wheel_mat
traces['time_fluo']=time_mat
traces['time_eye']=time_e_mat
traces['time_wheel']=time_w_mat
amplitudes=dict()
amplitudes['amplitudes_fluo']=amplitudes_responses
amplitudes['amplitudes_eyelid']=amplitudes_at_US
masks=dict()
masks['A_each']=[A[:,idx_responsive] for A in A_each]
masks['b_each']=b_each
masks['f_each']=f_each
return traces, masks, triggers_out, amplitudes, ISI
#%%
def fast_process_day(base_folder,min_radius=3,max_radius=4):
import pickle
import pylab as pl
try:
tmpl_name=glob(base_folder+'*template_total.npz')[0]
print tmpl_name
with np.load(tmpl_name) as ld:
mov_names_each=ld['movie_names']
f_results= glob(base_folder+'*results_analysis.npz')
f_results.sort()
A_s,C_s, YrA_s, Cn_s, b_s, f_s, shape = load_results(f_results)
# B_s, lab_imgs, cm_s = threshold_components(A_s,shape, min_size=10,max_size=50,max_perc=.5)
traces=[]
traces_BL=[]
traces_DFF=[]
for idx, mov_names in enumerate(mov_names_each):
A=A_s[idx]
# C=C_s[idx][neurons[idx]]
# YrA=YrA_s[idx][neurons[idx]]
b=b_s[idx]
f=f_s[idx]
chunk_sizes=[]
for mv in mov_names:
base_name=os.path.splitext(os.path.split(mv)[-1])[0]
with np.load(base_folder+base_name+'.npz') as ld:
TT=len(ld['shifts'])
chunk_sizes.append(TT)
masks_ws,pos_examples,neg_examples=cse.utilities.extract_binary_masks_blob(A, min_radius, \
shape, num_std_threshold=1, minCircularity= 0.5, minInertiaRatio = 0.2,minConvexity = .8)
#sizes=np.sum(masks_ws,(1,2))
#pos_examples=np.intersect1d(pos_examples,np.where(sizes<max_radius**2*np.pi)[0])
print len(pos_examples)
# pl.close()
# pl.imshow(np.mean(masks_ws[pos_examples],0))
pl.pause(.1)
#A=A.tocsc()[:,pos_examples]
traces,traces_DFF,traces_BL = generate_linked_traces(mov_names,chunk_sizes,A,b,f)
np.savez(f_results[idx][:-4]+'_masks.npz',masks_ws=masks_ws,pos_examples=pos_examples, neg_examples=neg_examples, A=A.todense(),b=b,f=f)
with open(f_results[idx][:-4]+'_traces.pk','w') as f:
pickle.dump(dict(traces=traces,traces_BL=traces_BL,traces_DFF=traces_DFF),f)
except:
print 'Failed'
return False
return True
#%%
def process_fast_process_day(base_folders,save_name='temp_save.npz'):
"""
Use this after having used fast_process_day
Parameters:
----------
base_folders: list of path to base folders
Returns:
--------
triggers_chunk_fluo: triggers associated to fluorescence (one per chunk)
eyelid_chunk: eyelid (one per chunk)
wheel_chunk: wheel (one per chunk)
triggers_chunk_bh: triggers associated to behavior(one per chunk)
tm_behav: time of behavior (one per chunk)
names_chunks: names of the file associated to each chunk(one per chunk)
fluo_chunk: fluorescence traces (one per chunk)
pos_examples_chunks: indexes of examples that were classified as good by the blob detector (one per chunk)
A_chunks: masks associated (one per chunk)
"""
triggers_chunk_fluo = []
eyelid_chunk = []
wheel_chunk = []
triggers_chunk_bh = []
tm_behav=[]
names_chunks=[]
fluo_chunk=[]
pos_examples_chunks=[]
A_chunks=[]
for base_folder in base_folders:
try:
print (base_folder)
with np.load(os.path.join(base_folder,'all_triggers.npz')) as ld:
triggers=ld['triggers']
trigger_names=ld['trigger_names']
with np.load(glob(os.path.join(base_folder,'*-template_total.npz'))[0]) as ld:
movie_names=ld['movie_names']
template_each=ld['template_each']
idx_chunks=[]
for name_chunk in movie_names:
idx_chunks.append([np.int(re.search('_00[0-9][0-9][0-9]_0',nm).group(0)[2:6])-1 for nm in name_chunk])
with np.load(base_folder+'behavioral_traces.npz') as ld:
res_bt = dict(**ld)
tm=res_bt['time']
f_rate_bh=1/np.median(np.diff(tm))
ISI=np.median([rs[3]-rs[2] for rs in res_bt['trial_info'][res_bt['idx_CS_US']]])
trig_int=np.hstack([((res_bt['trial_info'][:,2:4]-res_bt['trial_info'][:,0][:,None])*f_rate_bh),res_bt['trial_info'][:,-1][:,np.newaxis]]).astype(np.int)
trig_int[trig_int<0]=-1
trig_int=np.hstack([trig_int,len(tm)+trig_int[:,:1]*0])
trig_US=np.argmin(np.abs(tm))
trig_CS=np.argmin(np.abs(tm+ISI))
trig_int[res_bt['idx_CS_US'],0]=trig_CS
trig_int[res_bt['idx_CS_US'],1]=trig_US
trig_int[res_bt['idx_US'],1]=trig_US
trig_int[res_bt['idx_CS'],0]=trig_CS
eye_traces=np.array(res_bt['eyelid'])
wheel_traces=np.array(res_bt['wheel'])
fls=glob(os.path.join(base_folder,'*.results_analysis_traces.pk'))
fls.sort()
fls_m=glob(os.path.join(base_folder,'*.results_analysis_masks.npz'))
fls_m.sort()
for indxs,name_chunk,fl,fl_m in zip(idx_chunks,movie_names,fls,fls_m):
if np.all([nmc[:-4] for nmc in name_chunk] == trigger_names[indxs]):
triggers_chunk_fluo.append(triggers[indxs,:])
eyelid_chunk.append(eye_traces[indxs,:])
wheel_chunk.append(wheel_traces[indxs,:])
triggers_chunk_bh.append(trig_int[indxs,:])
tm_behav.append(tm)
names_chunks.append(fl)
with open(fl,'r') as f:
tr_dict=pickle.load(f)
print(fl)
fluo_chunk.append(tr_dict['traces_DFF'])
with np.load(fl_m) as ld:
A_chunks.append(scipy.sparse.coo_matrix(ld['A']))
pos_examples_chunks.append(ld['pos_examples'])
else:
raise Exception('Names of triggers not matching!')
except :
print("ERROR in:"+base_folder)
# raise
import pdb
pdb.set_trace()
if save_name is not None:
np.savez(save_name,triggers_chunk_fluo=triggers_chunk_fluo, triggers_chunk_bh=triggers_chunk_bh, eyelid_chunk=eyelid_chunk, wheel_chunk=wheel_chunk, tm_behav=tm_behav, fluo_chunk=fluo_chunk,names_chunks=names_chunks,pos_examples_chunks=pos_examples_chunks,A_chunks=A_chunks)
return triggers_chunk_fluo, eyelid_chunk,wheel_chunk ,triggers_chunk_bh ,tm_behav,names_chunks,fluo_chunk,pos_examples_chunks,A_chunks
| gpl-3.0 |
deroneriksson/systemml | projects/breast_cancer/breastcancer/preprocessing.py | 15 | 26035 | #-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
"""
Preprocessing -- Predicting Breast Cancer Proliferation Scores with
Apache SystemML
This module contains functions for the preprocessing phase of the
breast cancer project.
"""
import math
import os
import numpy as np
import openslide
from openslide import OpenSlideError
from openslide.deepzoom import DeepZoomGenerator
import pandas as pd
from pyspark.ml.linalg import Vectors
import pyspark.sql.functions as F
from scipy.ndimage.morphology import binary_fill_holes
from skimage.color import rgb2gray
from skimage.feature import canny
from skimage.morphology import binary_closing, binary_dilation, disk
# Open Whole-Slide Image
def open_slide(slide_num, folder, training):
"""
Open a whole-slide image, given an image number.
Args:
slide_num: Slide image number as an integer.
folder: Directory in which the slides folder is stored, as a string.
This should contain either a `training_image_data` folder with
images in the format `TUPAC-TR-###.svs`, or a `testing_image_data`
folder with images in the format `TUPAC-TE-###.svs`.
training: Boolean for training or testing datasets.
Returns:
An OpenSlide object representing a whole-slide image.
"""
if training:
filename = os.path.join(folder, "training_image_data",
"TUPAC-TR-{}.svs".format(str(slide_num).zfill(3)))
else:
# Testing images
filename = os.path.join(folder, "testing_image_data",
"TUPAC-TE-{}.svs".format(str(slide_num).zfill(3)))
try:
slide = openslide.open_slide(filename)
except OpenSlideError:
slide = None
except FileNotFoundError:
slide = None
return slide
# Create Tile Generator
def create_tile_generator(slide, tile_size, overlap):
"""
Create a tile generator for the given slide.
This generator is able to extract tiles from the overall
whole-slide image.
Args:
slide: An OpenSlide object representing a whole-slide image.
tile_size: The width and height of a square tile to be generated.
overlap: Number of pixels by which to overlap the tiles.
Returns:
A DeepZoomGenerator object representing the tile generator. Each
extracted tile is a PIL Image with shape
(tile_size, tile_size, channels).
Note: This generator is not a true "Python generator function", but
rather is an object that is capable of extracting individual tiles.
"""
generator = DeepZoomGenerator(slide, tile_size=tile_size, overlap=overlap, limit_bounds=True)
return generator
# Determine 20x Magnification Zoom Level
def get_20x_zoom_level(slide, generator):
"""
Return the zoom level that corresponds to a 20x magnification.
The generator can extract tiles from multiple zoom levels,
downsampling by a factor of 2 per level from highest to lowest
resolution.
Args:
slide: An OpenSlide object representing a whole-slide image.
generator: A DeepZoomGenerator object representing a tile generator.
Note: This generator is not a true "Python generator function",
but rather is an object that is capable of extracting individual
tiles.
Returns:
Zoom level corresponding to a 20x magnification, or as close as
possible.
"""
highest_zoom_level = generator.level_count - 1 # 0-based indexing
try:
mag = int(slide.properties[openslide.PROPERTY_NAME_OBJECTIVE_POWER])
# `mag / 20` gives the downsampling factor between the slide's
# magnification and the desired 20x magnification.
# `(mag / 20) / 2` gives the zoom level offset from the highest
# resolution level, based on a 2x downsampling factor in the
# generator.
offset = math.floor((mag / 20) / 2)
level = highest_zoom_level - offset
except ValueError:
# In case the slide magnification level is unknown, just
# use the highest resolution.
level = highest_zoom_level
return level
# Generate Tile Indices For Whole-Slide Image.
def process_slide(slide_num, folder, training, tile_size, overlap):
"""
Generate all possible tile indices for a whole-slide image.
Given a slide number, tile size, and overlap, generate
all possible (slide_num, tile_size, overlap, zoom_level, col, row)
indices.
Args:
slide_num: Slide image number as an integer.
folder: Directory in which the slides folder is stored, as a string.
This should contain either a `training_image_data` folder with
images in the format `TUPAC-TR-###.svs`, or a `testing_image_data`
folder with images in the format `TUPAC-TE-###.svs`.
training: Boolean for training or testing datasets.
tile_size: The width and height of a square tile to be generated.
overlap: Number of pixels by which to overlap the tiles.
Returns:
A list of (slide_num, tile_size, overlap, zoom_level, col, row)
integer index tuples representing possible tiles to extract.
"""
# Open slide.
slide = open_slide(slide_num, folder, training)
# Create tile generator.
generator = create_tile_generator(slide, tile_size, overlap)
# Get 20x zoom level.
zoom_level = get_20x_zoom_level(slide, generator)
# Generate all possible (zoom_level, col, row) tile index tuples.
cols, rows = generator.level_tiles[zoom_level]
tile_indices = [(slide_num, tile_size, overlap, zoom_level, col, row)
for col in range(cols) for row in range(rows)]
return tile_indices
# Generate Tile From Tile Index
def process_tile_index(tile_index, folder, training):
"""
Generate a tile from a tile index.
Given a (slide_num, tile_size, overlap, zoom_level, col, row) tile
index, generate a (slide_num, tile) tuple.
Args:
tile_index: A (slide_num, tile_size, overlap, zoom_level, col, row)
integer index tuple representing a tile to extract.
folder: Directory in which the slides folder is stored, as a string.
This should contain either a `training_image_data` folder with
images in the format `TUPAC-TR-###.svs`, or a `testing_image_data`
folder with images in the format `TUPAC-TE-###.svs`.
training: Boolean for training or testing datasets.
Returns:
A (slide_num, tile) tuple, where slide_num is an integer, and tile
is a 3D NumPy array of shape (tile_size, tile_size, channels) in
RGB format.
"""
slide_num, tile_size, overlap, zoom_level, col, row = tile_index
# Open slide.
slide = open_slide(slide_num, folder, training)
# Create tile generator.
generator = create_tile_generator(slide, tile_size, overlap)
# Generate tile.
tile = np.asarray(generator.get_tile(zoom_level, (col, row)))
return (slide_num, tile)
# Filter Tile For Dimensions & Tissue Threshold
def optical_density(tile):
"""
Convert a tile to optical density values.
Args:
tile: A 3D NumPy array of shape (tile_size, tile_size, channels).
Returns:
A 3D NumPy array of shape (tile_size, tile_size, channels)
representing optical density values.
"""
tile = tile.astype(np.float64)
#od = -np.log10(tile/255 + 1e-8)
od = -np.log((tile+1)/240)
return od
def keep_tile(tile_tuple, tile_size, tissue_threshold):
"""
Determine if a tile should be kept.
This filters out tiles based on size and a tissue percentage
threshold, using a custom algorithm. If a tile has height &
width equal to (tile_size, tile_size), and contains greater
than or equal to the given percentage, then it will be kept;
otherwise it will be filtered out.
Args:
tile_tuple: A (slide_num, tile) tuple, where slide_num is an
integer, and tile is a 3D NumPy array of shape
(tile_size, tile_size, channels).
tile_size: The width and height of a square tile to be generated.
tissue_threshold: Tissue percentage threshold.
Returns:
A Boolean indicating whether or not a tile should be kept for
future usage.
"""
slide_num, tile = tile_tuple
if tile.shape[0:2] == (tile_size, tile_size):
tile_orig = tile
# Check 1
# Convert 3D RGB image to 2D grayscale image, from
# 0 (dense tissue) to 1 (plain background).
tile = rgb2gray(tile)
# 8-bit depth complement, from 1 (dense tissue)
# to 0 (plain background).
tile = 1 - tile
# Canny edge detection with hysteresis thresholding.
# This returns a binary map of edges, with 1 equal to
# an edge. The idea is that tissue would be full of
# edges, while background would not.
tile = canny(tile)
# Binary closing, which is a dilation followed by
# an erosion. This removes small dark spots, which
# helps remove noise in the background.
tile = binary_closing(tile, disk(10))
# Binary dilation, which enlarges bright areas,
# and shrinks dark areas. This helps fill in holes
# within regions of tissue.
tile = binary_dilation(tile, disk(10))
# Fill remaining holes within regions of tissue.
tile = binary_fill_holes(tile)
# Calculate percentage of tissue coverage.
percentage = tile.mean()
check1 = percentage >= tissue_threshold
# Check 2
# Convert to optical density values
tile = optical_density(tile_orig)
# Threshold at beta
beta = 0.15
tile = np.min(tile, axis=2) >= beta
# Apply morphology for same reasons as above.
tile = binary_closing(tile, disk(2))
tile = binary_dilation(tile, disk(2))
tile = binary_fill_holes(tile)
percentage = tile.mean()
check2 = percentage >= tissue_threshold
return check1 and check2
else:
return False
# Generate Samples From Tile
def process_tile(tile_tuple, sample_size, grayscale):
"""
Process a tile into a group of smaller samples.
Cut up a tile into smaller blocks of sample_size x sample_size pixels,
change the shape of each sample from (H, W, channels) to
(channels, H, W), then flatten each into a vector of length
channels*H*W.
Args:
tile_tuple: A (slide_num, tile) tuple, where slide_num is an
integer, and tile is a 3D NumPy array of shape
(tile_size, tile_size, channels).
sample_size: The new width and height of the square samples to be
generated.
grayscale: Whether or not to generate grayscale samples, rather
than RGB.
Returns:
A list of (slide_num, sample) tuples representing cut up tiles,
where each sample is a 3D NumPy array of shape
(sample_size_x, sample_size_y, channels).
"""
slide_num, tile = tile_tuple
if grayscale:
tile = rgb2gray(tile)[:, :, np.newaxis] # Grayscale
# Save disk space and future IO time by converting from [0,1] to [0,255],
# at the expense of some minor loss of information.
tile = np.round(tile * 255).astype("uint8")
x, y, ch = tile.shape
# 1. Reshape into a 5D array of (num_x, sample_size_x, num_y, sample_size_y, ch), where
# num_x and num_y are the number of chopped tiles on the x and y axes, respectively.
# 2. Swap sample_size_x and num_y axes to create
# (num_x, num_y, sample_size_x, sample_size_y, ch).
# 3. Combine num_x and num_y into single axis, returning
# (num_samples, sample_size_x, sample_size_y, ch).
samples = (tile.reshape((x // sample_size, sample_size, y // sample_size, sample_size, ch))
.swapaxes(1,2)
.reshape((-1, sample_size, sample_size, ch)))
samples = [(slide_num, sample) for sample in list(samples)]
return samples
# Normalize staining
def normalize_staining(sample_tuple, beta=0.15, alpha=1, light_intensity=255):
"""
Normalize the staining of H&E histology slides.
This function normalizes the staining of H&E histology slides.
References:
- Macenko, Marc, et al. "A method for normalizing histology slides
for quantitative analysis." Biomedical Imaging: From Nano to Macro,
2009. ISBI'09. IEEE International Symposium on. IEEE, 2009.
- http://wwwx.cs.unc.edu/~mn/sites/default/files/macenko2009.pdf
- https://github.com/mitkovetta/staining-normalization
Args:
sample_tuple: A (slide_num, sample) tuple, where slide_num is an
integer, and sample is a 3D NumPy array of shape (H,W,C).
Returns:
A (slide_num, sample) tuple, where the sample is a 3D NumPy array
of shape (H,W,C) that has been stain normalized.
"""
# Setup.
slide_num, sample = sample_tuple
x = np.asarray(sample)
h, w, c = x.shape
x = x.reshape(-1, c).astype(np.float64) # shape (H*W, C)
# Reference stain vectors and stain saturations. We will normalize all slides
# to these references. To create these, grab the stain vectors and stain
# saturations from a desirable slide.
# Values in reference implementation for use with eigendecomposition approach, natural log,
# and `light_intensity=240`.
#stain_ref = np.array([0.5626, 0.2159, 0.7201, 0.8012, 0.4062, 0.5581]).reshape(3,2)
#max_sat_ref = np.array([1.9705, 1.0308]).reshape(2,1)
# SVD w/ log10, and `light_intensity=255`.
stain_ref = (np.array([0.54598845, 0.322116, 0.72385198, 0.76419107, 0.42182333, 0.55879629])
.reshape(3,2))
max_sat_ref = np.array([0.82791151, 0.61137274]).reshape(2,1)
# Convert RGB to OD.
# Note: The original paper used log10, and the reference implementation used the natural log.
#OD = -np.log((x+1)/light_intensity) # shape (H*W, C)
OD = -np.log10(x/light_intensity + 1e-8)
# Remove data with OD intensity less than beta.
# I.e. remove transparent pixels.
# Note: This needs to be checked per channel, rather than
# taking an average over all channels for a given pixel.
OD_thresh = OD[np.all(OD >= beta, 1), :] # shape (K, C)
# Calculate eigenvectors.
# Note: We can either use eigenvector decomposition, or SVD.
#eigvals, eigvecs = np.linalg.eig(np.cov(OD_thresh.T)) # np.cov results in inf/nans
U, s, V = np.linalg.svd(OD_thresh, full_matrices=False)
# Extract two largest eigenvectors.
# Note: We swap the sign of the eigvecs here to be consistent
# with other implementations. Both +/- eigvecs are valid, with
# the same eigenvalue, so this is okay.
#top_eigvecs = eigvecs[:, np.argsort(eigvals)[-2:]] * -1
top_eigvecs = V[0:2, :].T * -1 # shape (C, 2)
# Project thresholded optical density values onto plane spanned by
# 2 largest eigenvectors.
proj = np.dot(OD_thresh, top_eigvecs) # shape (K, 2)
# Calculate angle of each point wrt the first plane direction.
# Note: the parameters are `np.arctan2(y, x)`
angles = np.arctan2(proj[:, 1], proj[:, 0]) # shape (K,)
# Find robust extremes (a and 100-a percentiles) of the angle.
min_angle = np.percentile(angles, alpha)
max_angle = np.percentile(angles, 100-alpha)
# Convert min/max vectors (extremes) back to optimal stains in OD space.
# This computes a set of axes for each angle onto which we can project
# the top eigenvectors. This assumes that the projected values have
# been normalized to unit length.
extreme_angles = np.array(
[[np.cos(min_angle), np.cos(max_angle)],
[np.sin(min_angle), np.sin(max_angle)]]
) # shape (2,2)
stains = np.dot(top_eigvecs, extreme_angles) # shape (C, 2)
# Merge vectors with hematoxylin first, and eosin second, as a heuristic.
if stains[0, 0] < stains[0, 1]:
stains[:, [0, 1]] = stains[:, [1, 0]] # swap columns
# Calculate saturations of each stain.
# Note: Here, we solve
# OD = VS
# S = V^{-1}OD
# where `OD` is the matrix of optical density values of our image,
# `V` is the matrix of stain vectors, and `S` is the matrix of stain
# saturations. Since this is an overdetermined system, we use the
# least squares solver, rather than a direct solve.
sats, _, _, _ = np.linalg.lstsq(stains, OD.T)
# Normalize stain saturations to have same pseudo-maximum based on
# a reference max saturation.
max_sat = np.percentile(sats, 99, axis=1, keepdims=True)
sats = sats / max_sat * max_sat_ref
# Compute optimal OD values.
OD_norm = np.dot(stain_ref, sats)
# Recreate image.
# Note: If the image is immediately converted to uint8 with `.astype(np.uint8)`, it will
# not return the correct values due to the initital values being outside of [0,255].
# To fix this, we round to the nearest integer, and then clip to [0,255], which is the
# same behavior as Matlab.
#x_norm = np.exp(OD_norm) * light_intensity # natural log approach
x_norm = 10**(-OD_norm) * light_intensity - 1e-8 # log10 approach
x_norm = np.clip(np.round(x_norm), 0, 255).astype(np.uint8)
x_norm = x_norm.astype(np.uint8)
x_norm = x_norm.T.reshape(h,w,c)
return (slide_num, x_norm)
def flatten_sample(sample_tuple):
"""
Flatten a (H,W,C) sample into a (C*H*W) row vector.
Transpose each sample from (H, W, channels) to (channels, H, W), then
flatten each into a vector of length channels*H*W.
Args:
sample_tuple: A (slide_num, sample) tuple, where slide_num is an
integer, and sample is a 3D NumPy array of shape (H,W,C).
Returns:
A (slide_num, sample) tuple, where the sample has been transposed
from (H,W,C) to (C,H,W), and flattened to a vector of length
(C*H*W).
"""
slide_num, sample = sample_tuple
# 1. Swap axes from (sample_size_x, sample_size_y, ch) to
# (ch, sample_size_x, sample_size_y).
# 2. Flatten sample into (ch*sample_size_x*sample_size_y).
flattened_sample = sample.transpose(2,0,1).reshape(-1)
return (slide_num, flattened_sample)
# Get Ground Truth Labels
def get_labels_df(folder, filename="training_ground_truth.csv"):
"""
Create a DataFrame with the ground truth labels for each slide.
Args:
folder: Directory containing a `training_ground_truth.csv` file
containing the ground truth "tumor_score" and "molecular_score"
labels for each slide.
Returns:
A Pandas DataFrame containing the ground truth labels for each
slide.
"""
filepath = os.path.join(folder, filename)
labels_df = pd.read_csv(filepath, names=["tumor_score", "molecular_score"], header=None)
labels_df["slide_num"] = labels_df.index + 1 # slide numbering starts at 1
labels_df.set_index("slide_num", drop=False, inplace=True) # use the slide num as index
return labels_df
# Process All Slides Into A Spark DataFrame
def preprocess(spark, slide_nums, folder="data", training=True, tile_size=1024, overlap=0,
tissue_threshold=0.9, sample_size=256, grayscale=False, normalize_stains=True,
num_partitions=20000):
"""
Preprocess a set of whole-slide images.
Preprocess a set of whole-slide images as follows:
1. Tile the slides into tiles of size (tile_size, tile_size, 3).
2. Filter the tiles to remove unnecessary tissue.
3. Cut the remaining tiles into samples of size
(sample_size, sample_size, ch), where `ch` is 1 if `grayscale`
is true, or 3 otherwise.
Args:
spark: SparkSession.
slide_nums: List of whole-slide numbers to process.
folder: Local directory in which the slides folder and ground truth
file is stored, as a string. This should contain a
`training_image_data` folder with images in the format
`TUPAC-TR-###.svs`, as well as a `training_ground_truth.csv` file
containing the ground truth "tumor_score" and "molecular_score"
labels for each slide. Alternatively, the folder should contain a
`testing_image_data` folder with images in the format
`TUPAC-TE-###.svs`.
training: Boolean for training or testing datasets.
tile_size: The width and height of a square tile to be generated.
overlap: Number of pixels by which to overlap the tiles.
tissue_threshold: Tissue percentage threshold for filtering.
sample_size: The new width and height of the square samples to be
generated.
grayscale: Whether or not to generate grayscale samples, rather
than RGB.
normalize_stains: Whether or not to apply stain normalization.
num_partitions: Number of partitions to use during processing.
Returns:
A Spark DataFrame in which each row contains the slide number, tumor
score, molecular score, and the sample stretched out into a Vector.
"""
# Filter out broken slides
# Note: "Broken" here is due to a "version of OpenJPEG with broken support for chroma-subsampled
# images".
slides = (spark.sparkContext
.parallelize(slide_nums)
.filter(lambda slide: open_slide(slide, folder, training) is not None))
# Create DataFrame of all tile locations and increase number of partitions
# to avoid OOM during subsequent processing.
tile_indices = (slides.flatMap(
lambda slide: process_slide(slide, folder, training, tile_size, overlap)))
# TODO: Explore computing the ideal paritition sizes based on projected number
# of tiles after filtering. I.e. something like the following:
#rows = tile_indices.count()
#part_size = 128
#channels = 1 if grayscale else 3
#row_mb = tile_size * tile_size * channels * 8 / 1024 / 1024 # size of one row in MB
#rows_per_part = round(part_size / row_mb)
#num_parts = rows / rows_per_part
tile_indices = tile_indices.repartition(num_partitions)
tile_indices.cache()
# Extract all tiles into an RDD, filter, cut into smaller samples, apply stain
# normalization, and flatten.
tiles = tile_indices.map(lambda tile_index: process_tile_index(tile_index, folder, training))
filtered_tiles = tiles.filter(lambda tile: keep_tile(tile, tile_size, tissue_threshold))
samples = filtered_tiles.flatMap(lambda tile: process_tile(tile, sample_size, grayscale))
if normalize_stains:
samples = samples.map(lambda sample: normalize_staining(sample))
samples = samples.map(lambda sample: flatten_sample(sample))
# Convert to a DataFrame
if training:
# Append labels
labels_df = get_labels_df(folder)
samples_with_labels = (samples.map(
lambda tup: (int(tup[0]), int(labels_df.at[tup[0],"tumor_score"]),
float(labels_df.at[tup[0],"molecular_score"]), Vectors.dense(tup[1]))))
df = samples_with_labels.toDF(["slide_num", "tumor_score", "molecular_score", "sample"])
df = df.select(df.slide_num.astype("int"), df.tumor_score.astype("int"),
df.molecular_score, df["sample"])
else: # testing data -- no labels
df = samples.toDF(["slide_num", "sample"])
df = df.select(df.slide_num.astype("int"), df["sample"])
return df
# Save DataFrame
def save(df, filepath, sample_size, grayscale, mode="error", format="parquet", file_size=128):
"""
Save a preprocessed DataFrame with a constraint on the file sizes.
Args:
df: A Spark DataFrame.
filepath: Hadoop-supported path at which to save `df`.
sample_size: The width and height of the square samples.
grayscale: Whether or not to the samples are in grayscale format,
rather than RGB.
mode: Specifies the behavior of `df.write.mode` when the data
already exists. Options include:
* `append`: Append contents of this DataFrame to
existing data.
* `overwrite`: Overwrite existing data.
* `error`: Throw an exception if data already exists.
* `ignore`: Silently ignore this operation if data already
exists.
format: The format in which to save the DataFrame.
file_size: Size in MB of each saved file. 128 MB is an
empirically ideal size.
"""
channels = 1 if grayscale else 3
row_mb = sample_size * sample_size * channels * 8 / 1024 / 1024 # size of one row in MB
rows_per_file = round(file_size / row_mb)
df.write.option("maxRecordsPerFile", rows_per_file).mode(mode).save(filepath, format=format)
# Utilities
def add_row_indices(df, training=True):
"""
Add a row index column for faster data ingestion times with SystemML.
Args:
df: A Spark DataFrame in which each row contains the slide number,
tumor score, molecular score, and the sample stretched out into a
Vector.
training: Boolean for training or testing datasets.
Returns:
The Spark DataFrame with a row index column called "__INDEX".
"""
rdd = (df.rdd
.zipWithIndex()
.map(lambda r: (r[1] + 1, *r[0]))) # flatten & convert index to 1-based indexing
if training:
df = rdd.toDF(['__INDEX', 'slide_num', 'tumor_score', 'molecular_score', 'sample'])
df = df.select(df["__INDEX"].astype("int"), df.slide_num.astype("int"),
df.tumor_score.astype("int"), df.molecular_score, df["sample"])
else: # testing data -- no labels
df = rdd.toDF(["__INDEX", "slide_num", "sample"])
df = df.select(df["__INDEX"].astype("int"), df.slide_num.astype("int"), df["sample"])
return df
def sample(df, frac, training=True, seed=None):
"""
Sample the DataFrame, stratified on the class.
Args:
df: A Spark DataFrame in which each row contains the slide number,
tumor score, molecular score, and the sample stretched out into a
Vector.
frac: Fraction of rows to keep.
training: Boolean for training or testing datasets.
seed: Random seed used for the sampling.
Returns:
A stratified sample of the original Spark DataFrame.
"""
df_sample = df.sampleBy("tumor_score", fractions={1: frac, 2: frac, 3: frac}, seed=seed)
return df_sample
| apache-2.0 |
mdjurfeldt/nest-simulator | topology/doc/user_manual_scripts/layers.py | 8 | 10527 | # -*- coding: utf-8 -*-
#
# layers.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# Run as python layers.py > layers.log
import matplotlib.pyplot as plt
import nest
import numpy as np
# seed NumPy RNG to ensure identical results for runs with random placement
np.random.seed(1234567)
def beautify_layer(l, fig=plt.gcf(), xlabel=None, ylabel=None,
xlim=None, ylim=None, xticks=None, yticks=None, dx=0, dy=0):
"""Assume either x and ylims/ticks given or none"""
top = nest.GetStatus(l)[0]['topology']
ctr = top['center']
ext = top['extent']
if xticks is None:
if 'rows' in top:
dx = float(ext[0]) / top['columns']
dy = float(ext[1]) / top['rows']
xticks = ctr[0] - ext[0] / 2. + dx / 2. + dx * np.arange(
top['columns'])
yticks = ctr[1] - ext[1] / 2. + dy / 2. + dy * np.arange(
top['rows'])
if xlim is None:
xlim = [ctr[0] - ext[0] / 2. - dx / 2., ctr[0] + ext[
0] / 2. + dx / 2.] # extra space so extent is visible
ylim = [ctr[1] - ext[1] / 2. - dy / 2., ctr[1] + ext[1] / 2. + dy / 2.]
else:
ext = [xlim[1] - xlim[0], ylim[1] - ylim[0]]
ax = fig.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_aspect('equal', 'box')
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.grid(True)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return
# --------------------------------------------------
nest.ResetKernel()
#{ layer1 #}
import nest.topology as tp
l = tp.CreateLayer({'rows': 5,
'columns': 5,
'elements': 'iaf_psc_alpha'})
#{ end #}
fig = tp.PlotLayer(l, nodesize=50)
beautify_layer(l, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)')
ax = fig.gca()
tx = []
for r in range(5):
tx.append(ax.text(0.65, 0.4 - r * 0.2, str(r),
horizontalalignment='center',
verticalalignment='center'))
tx.append(ax.text(-0.4 + r * 0.2, 0.65, str(r),
horizontalalignment='center',
verticalalignment='center'))
# For bbox_extra_artists, see
# https://github.com/matplotlib/matplotlib/issues/351
plt.savefig('../user_manual_figures/layer1.png', bbox_inches='tight',
bbox_extra_artists=tx)
print("#{ layer1s.log #}")
#{ layer1s #}
print(nest.GetStatus(l)[0]['topology'])
#{ end #}
print("#{ end.log #}")
print("#{ layer1p.log #}")
#{ layer1p #}
nest.PrintNetwork(depth=3)
#{ end #}
print("#{ end.log #}")
# --------------------------------------------------
nest.ResetKernel()
#{ layer2 #}
l = tp.CreateLayer({'rows': 5,
'columns': 5,
'extent': [2.0, 0.5],
'elements': 'iaf_psc_alpha'})
#{ end #}
fig = tp.PlotLayer(l, nodesize=50)
beautify_layer(l, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)')
ax = fig.gca()
tx = []
for r in range(5):
tx.append(fig.gca().text(1.25, 0.2 - r * 0.1, str(r),
horizontalalignment='center',
verticalalignment='center'))
tx.append(fig.gca().text(-0.8 + r * 0.4, 0.35, str(r),
horizontalalignment='center',
verticalalignment='center'))
# See https://github.com/matplotlib/matplotlib/issues/351
plt.savefig('../user_manual_figures/layer2.png', bbox_inches='tight',
bbox_extra_artists=tx)
# --------------------------------------------------
nest.ResetKernel()
#{ layer3 #}
l1 = tp.CreateLayer({'rows': 5, 'columns': 5, 'elements': 'iaf_psc_alpha'})
l2 = tp.CreateLayer({'rows': 5, 'columns': 5, 'elements': 'iaf_psc_alpha',
'center': [-1., 1.]})
l3 = tp.CreateLayer({'rows': 5, 'columns': 5, 'elements': 'iaf_psc_alpha',
'center': [1.5, 0.5]})
#{ end #}
fig = tp.PlotLayer(l1, nodesize=50)
tp.PlotLayer(l2, nodesize=50, nodecolor='g', fig=fig)
tp.PlotLayer(l3, nodesize=50, nodecolor='r', fig=fig)
beautify_layer(l1, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)',
xlim=[-1.6, 2.1], ylim=[-0.6, 1.6],
xticks=np.arange(-1.4, 2.05, 0.2),
yticks=np.arange(-0.4, 1.45, 0.2))
plt.savefig('../user_manual_figures/layer3.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer3a #}
nc, nr = 5, 3
d = 0.1
l = tp.CreateLayer({'columns': nc, 'rows': nr, 'elements': 'iaf_psc_alpha',
'extent': [nc * d, nr * d], 'center': [nc * d / 2., 0.]})
#{ end #}
fig = tp.PlotLayer(l, nodesize=100)
plt.plot(0, 0, 'x', markersize=20, c='k', mew=3)
plt.plot(nc * d / 2, 0, 'o', markersize=20, c='k', mew=3, mfc='none',
zorder=100)
beautify_layer(l, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)',
xticks=np.arange(0., 0.501, 0.05),
yticks=np.arange(-0.15, 0.151, 0.05),
xlim=[-0.05, 0.55], ylim=[-0.2, 0.2])
plt.savefig('../user_manual_figures/layer3a.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer4 #}
import numpy as np
pos = [[np.random.uniform(-0.5, 0.5), np.random.uniform(-0.5, 0.5)]
for j in range(50)]
l = tp.CreateLayer({'positions': pos,
'elements': 'iaf_psc_alpha'})
#{ end #}
fig = tp.PlotLayer(l, nodesize=50)
beautify_layer(l, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)',
xlim=[-0.55, 0.55], ylim=[-0.55, 0.55],
xticks=[-0.5, 0., 0.5], yticks=[-0.5, 0., 0.5])
plt.savefig('../user_manual_figures/layer4.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer4_3d #}
import numpy as np
pos = [[np.random.uniform(-0.5, 0.5), np.random.uniform(-0.5, 0.5),
np.random.uniform(-0.5, 0.5)] for j in range(200)]
l = tp.CreateLayer({'positions': pos,
'elements': 'iaf_psc_alpha'})
#{ end #}
fig = tp.PlotLayer(l, nodesize=50)
plt.savefig('../user_manual_figures/layer4_3d.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ player #}
lp = tp.CreateLayer({'rows': 1, 'columns': 5, 'extent': [5., 1.],
'elements': 'iaf_psc_alpha',
'edge_wrap': True})
#{ end #}
# fake plot with layer on line and circle
clist = [(0, 0, 1), (0.35, 0, 1), (0.6, 0, 1), (0.8, 0, 1), (1.0, 0, 1)]
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax1.plot([0.5, 5.5], [0, 0], 'k-', lw=2)
ax1.scatter(range(1, 6), [0] * 5, s=200, c=clist)
ax1.set_xlim([0, 6])
ax1.set_ylim([-0.5, 1.25])
ax1.set_aspect('equal', 'box')
ax1.set_xticks([])
ax1.set_yticks([])
for j in range(1, 6):
ax1.text(j, 0.5, str('(%d,0)' % (j - 3)),
horizontalalignment='center', verticalalignment='bottom')
ax1a = fig.add_subplot(223)
ax1a.plot([0.5, 5.5], [0, 0], 'k-', lw=2)
ax1a.scatter(range(1, 6), [0] * 5, s=200,
c=[clist[0], clist[1], clist[2], clist[2], clist[1]])
ax1a.set_xlim([0, 6])
ax1a.set_ylim([-0.5, 1.25])
ax1a.set_aspect('equal', 'box')
ax1a.set_xticks([])
ax1a.set_yticks([])
for j in range(1, 6):
ax1a.text(j, 0.5, str('(%d,0)' % (j - 3)),
horizontalalignment='center', verticalalignment='bottom')
ax2 = fig.add_subplot(122)
phic = np.arange(0., 2 * np.pi + 0.5, 0.1)
r = 5. / (2 * np.pi)
ax2.plot(r * np.cos(phic), r * np.sin(phic), 'k-', lw=2)
phin = np.arange(0., 4.1, 1.) * 2 * np.pi / 5
ax2.scatter(r * np.sin(phin), r * np.cos(phin), s=200,
c=[clist[0], clist[1], clist[2], clist[2], clist[1]])
ax2.set_xlim([-1.3, 1.3])
ax2.set_ylim([-1.2, 1.2])
ax2.set_aspect('equal', 'box')
ax2.set_xticks([])
ax2.set_yticks([])
for j in range(5):
ax2.text(1.4 * r * np.sin(phin[j]), 1.4 * r * np.cos(phin[j]),
str('(%d,0)' % (j + 1 - 3)),
horizontalalignment='center', verticalalignment='center')
plt.savefig('../user_manual_figures/player.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer6 #}
l = tp.CreateLayer({'rows': 1, 'columns': 2,
'elements': ['iaf_cond_alpha', 'poisson_generator']})
#{ end #}
print("#{ layer6 #}")
nest.PrintNetwork(depth=3)
print("#{ end #}")
# --------------------------------------------------
nest.ResetKernel()
#{ layer7 #}
l = tp.CreateLayer({'rows': 1, 'columns': 2,
'elements': ['iaf_cond_alpha', 10, 'poisson_generator',
'noise_generator', 2]})
#{ end #}
print("#{ layer7 #}")
nest.PrintNetwork(depth=3)
print("#{ end #}")
# --------------------------------------------------
nest.ResetKernel()
#{ layer10 #}
for lyr in ['L23', 'L4', 'L56']:
nest.CopyModel('iaf_psc_alpha', lyr + 'pyr')
nest.CopyModel('iaf_psc_alpha', lyr + 'in', {'V_th': -52.})
l = tp.CreateLayer({'rows': 20, 'columns': 20, 'extent': [0.5, 0.5],
'elements': ['L23pyr', 3, 'L23in',
'L4pyr', 3, 'L4in',
'L56pyr', 3, 'L56in']})
#{ end #}
# --------------------------------------------------
nest.ResetKernel()
#{ vislayer #}
l = tp.CreateLayer({'rows': 21, 'columns': 21,
'elements': 'iaf_psc_alpha'})
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 0.4}},
'kernel': {'gaussian': {'p_center': 1.0, 'sigma': 0.15}}}
tp.ConnectLayers(l, l, conndict)
fig = tp.PlotLayer(l, nodesize=80)
ctr = tp.FindCenterElement(l)
tp.PlotTargets(ctr, l, fig=fig,
mask=conndict['mask'], kernel=conndict['kernel'],
src_size=250, tgt_color='red', tgt_size=20,
kernel_color='green')
#{ end #}
plt.savefig('../user_manual_figures/vislayer.png', bbox_inches='tight')
| gpl-2.0 |
google-research/google-research | constrained_language_typology/compute_associations_main.py | 1 | 9567 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Computation of feature associations.
Computes:
1) Most likely value for a given feature given the clade (genetic
preference) for genera and families.
2) Most likely value2 for a given feature2 given feature1, value1
(implicational feature preference).
Usage, e.g.:
First run:
python3 sigtyp_reader_main.py \
--sigtyp_dir ~/ST2020-master/data \
--output_dir=/var/tmp/sigtyp
Then, using the defaults:
python3 compute_associations_main.py \
--training_data=/var/tmp/sigtyp/train.csv \
--dev_data=/var/tmp/sigtyp/dev.csv
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import os
from absl import app
from absl import flags
import compute_associations # pylint: disable=[unused-import]
import constants as const
import pandas as pd
import utils
# pylint: disable=g-long-lambda
flags.DEFINE_string(
"training_data", "",
"Training data in CSV file format with the `|` column separator. "
"This format is produced by `sigtyp_reader_main.py`.")
flags.DEFINE_string(
"dev_data", None,
"Development data in CSV file format with the `|` column separator. "
"This format is produced by `sigtyp_reader_main.py`.")
flags.DEFINE_float(
"close_enough", 2500,
"Distance in kilometers between two languages to count as 'close enough' "
"to be in the same neighborhood")
FLAGS = flags.FLAGS
def write_neighborhoods(path, neighborhoods):
"""Writes neighbourhood associations to a file."""
with open(path, "w") as stream:
# Write out for max value
#
# Lat,Lng for language
# Feature
# Value
# Probability of value given feature and neighborhood
# Total counts for feature+value+neighborhood
stream.write("{}|{}|{}|{}|{}\n".format(
"lat,lng", "f", "v", '"p(v|f, c)"', "n(f, c)"))
for latlng in neighborhoods:
for f in neighborhoods[latlng]:
tot = 0
max_c = 0
for v in neighborhoods[latlng][f]:
if neighborhoods[latlng][f][v] > max_c:
max_c = neighborhoods[latlng][f][v]
max_v = v
tot += neighborhoods[latlng][f][v]
stream.write("{},{}|{}|{}|{:0.3f}|{}\n".format(
latlng[0], latlng[1], f, max_v, max_c / tot, tot))
def write_implicational(path, implicational, implicational_prior):
"""Writes implicational associations to a file."""
with open(path, "w") as stream:
# Write out for max value
#
# Feature1
# Value1
# Feature2
# Value2
# Probabilty of Value2 given Feature1, Value1 and Feature2
# Total counts for Feature1, Value1 and Feature2
# Probability of Value2 given Feature2
# Total counts for Value2, Feature2
stream.write("{}|{}|{}|{}|{}|{}|{}|{}\n".format(
"f1", "v1", "f2", "v2", '"p(v2|f1, v1, f2)"', "n(f1, v1, f2)",
'"p(v2|f2)"', "n(f2, v2)"))
for (f1, v1) in implicational:
for f2 in implicational[f1, v1]:
tot = 0
max_c = 0
for v2 in implicational[f1, v1][f2]:
if implicational[f1, v1][f2][v2] > max_c:
max_c = implicational[f1, v1][f2][v2]
max_v2 = v2
tot += implicational[f1, v1][f2][v2]
tot_f2 = 0
for v2 in implicational_prior[f2]:
tot_f2 += implicational_prior[f2][v2]
prior_v2_prob = implicational_prior[f2][max_v2] / tot_f2
stream.write("{}|{}|{}|{}|{:0.3f}|{}|{:0.3f}|{}\n".format(
f1, v1, f2, max_v2, max_c / tot, tot, prior_v2_prob, tot_f2))
def write_clades(path, clades):
"""Writes glade information to a file."""
with open(path, "w") as stream:
# Write out for max value
#
# Clade
# Feature
# Value
# Probability of value given feature and clade
# Total counts for feature+clade
stream.write("{}|{}|{}|{}|{}\n".format(
"clade", "f", "v", '"p(v|f, c)"', "n(f, c)"))
for clade in clades:
for f in clades[clade]:
tot = 0
max_c = 0
for v in clades[clade][f]:
if clades[clade][f][v] > max_c:
max_c = clades[clade][f][v]
max_v = v
tot += clades[clade][f][v]
stream.write("{}|{}|{}|{:0.3f}|{}\n".format(
clade, f, max_v, max_c / tot, tot))
def find_close_languages(lat1, lng1, languages, distance_cache):
"""Given latitude/longitude coordinates finds the nearest language."""
close_language_indices = []
for i, language in enumerate(languages):
lat2 = language["latitude"]
lng2 = language["longitude"]
loc1 = (float(lat1), float(lng1))
loc2 = (float(lat2), float(lng2))
if (loc1, loc2) not in distance_cache:
dist = utils.haversine_distance((float(lat1), float(lng1)),
(float(lat2), float(lng2)))
distance_cache[(loc1, loc2)] = dist
distance_cache[(loc2, loc1)] = dist
else:
dist = distance_cache[(loc1, loc2)]
if dist < FLAGS.close_enough:
close_language_indices.append(i)
return close_language_indices
def correlate_features_for_training():
"""Computes all the feature associations required for training a model."""
training = pd.read_csv(FLAGS.training_data, delimiter="|",
encoding=const.ENCODING)
features = training.columns[7:]
clades = collections.defaultdict(
lambda:
collections.defaultdict(
lambda:
collections.defaultdict(
lambda:
collections.defaultdict(int))))
implicational = collections.defaultdict(
lambda:
collections.defaultdict(
lambda:
collections.defaultdict(int)))
# Whereas the implicationals collect the conditional probability of v2, given
# f1,v1 and f2, this just collects the conditional probability of v2 given
# v1. If the latter is also high, then the fact that the former is high is
# probably of less interest.
implicational_prior = collections.defaultdict(
lambda:
collections.defaultdict(int))
neighborhoods = collections.defaultdict(
lambda:
collections.defaultdict(
lambda:
collections.defaultdict(int)))
feature_frequency = collections.defaultdict(int)
distance_cache = {}
training_list = training.to_dict(orient="row")
for language_df in training_list:
genus = language_df["genus"]
family = language_df["family"]
for f1 in features:
v1 = language_df[f1]
if pd.isnull(v1):
continue
clades["genus"][genus][f1][v1] += 1
clades["family"][family][f1][v1] += 1
feature_frequency[f1, v1] += 1
for f2 in features:
if f1 == f2:
continue
v2 = language_df[f2]
if pd.isnull(v2):
continue
implicational[f1, v1][f2][v2] += 1
for f2 in features:
v2 = language_df[f2]
if pd.isnull(v2):
continue
implicational_prior[f2][v2] += 1
# Find nearby languages
lat1 = language_df["latitude"]
lng1 = language_df["longitude"]
close_language_indices = find_close_languages(
lat1, lng1, training_list, distance_cache)
if len(close_language_indices) == 1:
continue
for f1 in features:
for k in close_language_indices:
v1 = training_list[k][f1]
if pd.isnull(v1):
continue
neighborhoods[lat1, lng1][f1][v1] += 1
if FLAGS.dev_data:
# If we are also processing the development data, make sure that we also
# provide neighborhoods for the lat,lng for each language in the development
# data --- of course only actually using data from training.
development = pd.read_csv(FLAGS.dev_data, delimiter="|",
encoding=const.ENCODING)
development_list = development.to_dict(orient="row")
for language_df in development_list:
lat1 = language_df["latitude"]
lng1 = language_df["longitude"]
close_language_indices = find_close_languages(
lat1, lng1, training_list, distance_cache)
if len(close_language_indices) == 1:
continue
for f1 in features:
for k in close_language_indices:
v1 = training_list[k][f1]
if pd.isnull(v1):
continue
neighborhoods[lat1, lng1][f1][v1] += 1
clade_types = [("genus", FLAGS.genus_filename),
("family", FLAGS.family_filename)]
for clade_type, clade_filename in clade_types:
write_clades(os.path.join(FLAGS.association_dir, clade_filename),
clades[clade_type])
write_neighborhoods(os.path.join(
FLAGS.association_dir, FLAGS.neighborhood_filename), neighborhoods)
write_implicational(os.path.join(
FLAGS.association_dir, FLAGS.implicational_filename),
implicational, implicational_prior)
def main(unused_argv):
correlate_features_for_training()
if __name__ == "__main__":
app.run(main)
| apache-2.0 |
XiaoxiaoLiu/morphology_analysis | IVSCC/add_extra_ratio_features.py | 1 | 2313 | import pandas as pd
import platform
if (platform.system() == "Linux"):
WORK_PATH = "/local1/xiaoxiaol/work"
else:
WORK_PATH = "/Users/xiaoxiaoliu/work"
###############################################################################
#data_DIR = '/data/mat/xiaoxiaol/data/lims2/0903_filtered_ephys_qc'
data_DIR = WORK_PATH +'/data/lims2/0923_pw_aligned'
# /original stores the downloaded swc files
original_dir = data_DIR + "/pw_aligned"
preprocessed_dir = data_DIR + "/preprocessed"
###############################################################################
#gl_feature_names = np.array(
# ['total_length', 'soma_surface', 'num_stems', 'num_bifurcations', 'num_branches', 'num_of_tips',
# 'overall_width', 'overall_height', 'overall_depth', 'average_diameter', 'num_nodes',
# 'total_surface', 'total_volume', 'max_euclidean_distance', 'max_path_distance', 'max_branch_order',
# 'average_contraction', 'average fragmentation', 'parent_daughter_ratio', 'bifurcation_angle_local',
# 'bifurcation_angle_remote','height_width_ratio','average_branch_length','length_surface_ratio'])
#
fnames = [u'Unnamed: 0', u'specimen_id', u'specimen_name', u'dendrite_type',
u'cre_line', u'layer', u'swc_file', u'num_nodes', u'soma_surface',
u'num_stems', u'num_bifurcations', u'num_branches', u'num_of_tips',
u'overall_depth', u'overall_width', u'overall_height',
u'average_diameter', u'total_length', u'total_surface', u'total_volume',
u'max_euclidean_distance', u'max_path_distance', u'max_branch_order',
u'average_contraction', u'average fragmentation',
u'parent_daughter_ratio', u'bifurcation_angle_local',
u'bifurcation_angle_remote', u'moment1', u'moment2', u'moment3',
u'moment4', u'moment5', u'moment6', u'moment7', u'moment8', u'moment9',
u'moment10', u'moment11', u'moment12', u'moment13', u'avgR']
df_f = pd.read_csv(preprocessed_dir + '/features_with_db_tags.csv')
df_f.columns=fnames
df_f['height_width_ratio'] = df_f['overall_height']/df_f['overall_width']
df_f['average_branch_length']=df_f['total_length']/df_f['num_branches']
df_f ['length_surface_ratio'] = df_f ['total_length']/df_f['total_surface']
df_f.to_csv(preprocessed_dir + '/features_with_db_tags_added.csv')
| gpl-3.0 |
harshaneelhg/scikit-learn | examples/applications/plot_model_complexity_influence.py | 323 | 6372 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
jmschrei/scikit-learn | examples/cluster/plot_mini_batch_kmeans.py | 265 | 4081 | """
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
##############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
k_means_labels = k_means.labels_
k_means_cluster_centers = k_means.cluster_centers_
k_means_labels_unique = np.unique(k_means_labels)
##############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
mbk_means_labels = mbk.labels_
mbk_means_cluster_centers = mbk.cluster_centers_
mbk_means_labels_unique = np.unique(mbk_means_labels)
##############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for l in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/matplotlib/backends/backend_gtk3.py | 2 | 32330 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os, sys
try:
import gi
except ImportError:
raise ImportError("Gtk3 backend requires pygobject to be installed.")
try:
gi.require_version("Gtk", "3.0")
except AttributeError:
raise ImportError(
"pygobject version too old -- it must have require_version")
except ValueError:
raise ImportError(
"Gtk3 backend requires the GObject introspection bindings for Gtk 3 "
"to be installed.")
try:
from gi.repository import Gtk, Gdk, GObject, GLib
except ImportError:
raise ImportError("Gtk3 backend requires pygobject to be installed.")
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, GraphicsContextBase,
NavigationToolbar2, RendererBase, TimerBase, cursors)
from matplotlib.backend_bases import ToolContainerBase, StatusbarBase
from matplotlib.backend_managers import ToolManager
from matplotlib.cbook import is_writable_file_like
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
from matplotlib import (
backend_tools, cbook, colors as mcolors, lines, verbose, rcParams)
backend_version = "%s.%s.%s" % (
Gtk.get_major_version(), Gtk.get_micro_version(), Gtk.get_minor_version())
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 96
cursord = {
cursors.MOVE : Gdk.Cursor.new(Gdk.CursorType.FLEUR),
cursors.HAND : Gdk.Cursor.new(Gdk.CursorType.HAND2),
cursors.POINTER : Gdk.Cursor.new(Gdk.CursorType.LEFT_PTR),
cursors.SELECT_REGION : Gdk.Cursor.new(Gdk.CursorType.TCROSS),
cursors.WAIT : Gdk.Cursor.new(Gdk.CursorType.WATCH),
}
class TimerGTK3(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` using GTK3 for timer events.
Attributes
----------
interval : int
The time between timer events in milliseconds. Default is 1000 ms.
single_shot : bool
Boolean flag indicating whether this timer should operate as single
shot (run once and then stop). Defaults to False.
callbacks : list
Stores list of (func, args) tuples that will be called upon timer
events. This list can be manipulated directly, or the functions
`add_callback` and `remove_callback` can be used.
'''
def _timer_start(self):
# Need to stop it, otherwise we potentially leak a timer id that will
# never be stopped.
self._timer_stop()
self._timer = GLib.timeout_add(self._interval, self._on_timer)
def _timer_stop(self):
if self._timer is not None:
GLib.source_remove(self._timer)
self._timer = None
def _timer_set_interval(self):
# Only stop and restart it if the timer has already been started
if self._timer is not None:
self._timer_stop()
self._timer_start()
def _on_timer(self):
TimerBase._on_timer(self)
# Gtk timeout_add() requires that the callback returns True if it
# is to be called again.
if len(self.callbacks) > 0 and not self._single:
return True
else:
self._timer = None
return False
class FigureCanvasGTK3(Gtk.DrawingArea, FigureCanvasBase):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
}
# Setting this as a static constant prevents
# this resulting expression from leaking
event_mask = (Gdk.EventMask.BUTTON_PRESS_MASK |
Gdk.EventMask.BUTTON_RELEASE_MASK |
Gdk.EventMask.EXPOSURE_MASK |
Gdk.EventMask.KEY_PRESS_MASK |
Gdk.EventMask.KEY_RELEASE_MASK |
Gdk.EventMask.ENTER_NOTIFY_MASK |
Gdk.EventMask.LEAVE_NOTIFY_MASK |
Gdk.EventMask.POINTER_MOTION_MASK |
Gdk.EventMask.POINTER_MOTION_HINT_MASK|
Gdk.EventMask.SCROLL_MASK)
def __init__(self, figure):
FigureCanvasBase.__init__(self, figure)
GObject.GObject.__init__(self)
self._idle_draw_id = 0
self._lastCursor = None
self.connect('scroll_event', self.scroll_event)
self.connect('button_press_event', self.button_press_event)
self.connect('button_release_event', self.button_release_event)
self.connect('configure_event', self.configure_event)
self.connect('draw', self.on_draw_event)
self.connect('key_press_event', self.key_press_event)
self.connect('key_release_event', self.key_release_event)
self.connect('motion_notify_event', self.motion_notify_event)
self.connect('leave_notify_event', self.leave_notify_event)
self.connect('enter_notify_event', self.enter_notify_event)
self.connect('size_allocate', self.size_allocate)
self.set_events(self.__class__.event_mask)
self.set_double_buffered(True)
self.set_can_focus(True)
self._renderer_init()
default_context = GLib.main_context_get_thread_default() or GLib.main_context_default()
def destroy(self):
#Gtk.DrawingArea.destroy(self)
self.close_event()
if self._idle_draw_id != 0:
GLib.source_remove(self._idle_draw_id)
def scroll_event(self, widget, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - event.y
if event.direction==Gdk.ScrollDirection.UP:
step = 1
else:
step = -1
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=event)
return False # finish event propagation?
def button_press_event(self, widget, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - event.y
FigureCanvasBase.button_press_event(self, x, y, event.button, guiEvent=event)
return False # finish event propagation?
def button_release_event(self, widget, event):
x = event.x
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - event.y
FigureCanvasBase.button_release_event(self, x, y, event.button, guiEvent=event)
return False # finish event propagation?
def key_press_event(self, widget, event):
key = self._get_key(event)
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
return True # stop event propagation
def key_release_event(self, widget, event):
key = self._get_key(event)
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
return True # stop event propagation
def motion_notify_event(self, widget, event):
if event.is_hint:
t, x, y, state = event.window.get_pointer()
else:
x, y, state = event.x, event.y, event.get_state()
# flipy so y=0 is bottom of canvas
y = self.get_allocation().height - y
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
return False # finish event propagation?
def leave_notify_event(self, widget, event):
FigureCanvasBase.leave_notify_event(self, event)
def enter_notify_event(self, widget, event):
FigureCanvasBase.enter_notify_event(self, event)
def size_allocate(self, widget, allocation):
dpival = self.figure.dpi
winch = allocation.width / dpival
hinch = allocation.height / dpival
self.figure.set_size_inches(winch, hinch, forward=False)
FigureCanvasBase.resize_event(self)
self.draw_idle()
def _get_key(self, event):
if event.keyval in self.keyvald:
key = self.keyvald[event.keyval]
elif event.keyval < 256:
key = chr(event.keyval)
else:
key = None
modifiers = [
(Gdk.ModifierType.MOD4_MASK, 'super'),
(Gdk.ModifierType.MOD1_MASK, 'alt'),
(Gdk.ModifierType.CONTROL_MASK, 'ctrl'),
]
for key_mask, prefix in modifiers:
if event.state & key_mask:
key = '{0}+{1}'.format(prefix, key)
return key
def configure_event(self, widget, event):
if widget.get_property("window") is None:
return
w, h = event.width, event.height
if w < 3 or h < 3:
return # empty fig
# resize the figure (in inches)
dpi = self.figure.dpi
self.figure.set_size_inches(w/dpi, h/dpi, forward=False)
return False # finish event propagation?
def on_draw_event(self, widget, ctx):
# to be overwritten by GTK3Agg or GTK3Cairo
pass
def draw(self):
if self.get_visible() and self.get_mapped():
self.queue_draw()
# do a synchronous draw (its less efficient than an async draw,
# but is required if/when animation is used)
self.get_property("window").process_updates (False)
def draw_idle(self):
if self._idle_draw_id != 0:
return
def idle_draw(*args):
try:
self.draw()
finally:
self._idle_draw_id = 0
return False
self._idle_draw_id = GLib.idle_add(idle_draw)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
This is useful for getting periodic events through the backend's native
event loop. Implemented only for backends with GUIs.
Other Parameters
----------------
interval : scalar
Timer interval in milliseconds
callbacks : list
Sequence of (func, args, kwargs) where ``func(*args, **kwargs)``
will be executed by the timer every *interval*.
"""
return TimerGTK3(*args, **kwargs)
def flush_events(self):
Gdk.threads_enter()
while Gtk.events_pending():
Gtk.main_iteration()
Gdk.flush()
Gdk.threads_leave()
class FigureManagerGTK3(FigureManagerBase):
"""
Attributes
----------
canvas : `FigureCanvas`
The FigureCanvas instance
num : int or str
The Figure number
toolbar : Gtk.Toolbar
The Gtk.Toolbar (gtk only)
vbox : Gtk.VBox
The Gtk.VBox containing the canvas and toolbar (gtk only)
window : Gtk.Window
The Gtk.Window (gtk only)
"""
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
self.window = Gtk.Window()
self.window.set_wmclass("matplotlib", "Matplotlib")
self.set_window_title("Figure %d" % num)
try:
self.window.set_icon_from_file(window_icon)
except (SystemExit, KeyboardInterrupt):
# re-raise exit type Exceptions
raise
except:
# some versions of gtk throw a glib.GError but not
# all, so I am not sure how to catch it. I am unhappy
# doing a blanket catch here, but am not sure what a
# better way is - JDH
verbose.report('Could not load matplotlib icon: %s' % sys.exc_info()[1])
self.vbox = Gtk.Box()
self.vbox.set_property("orientation", Gtk.Orientation.VERTICAL)
self.window.add(self.vbox)
self.vbox.show()
self.canvas.show()
self.vbox.pack_start(self.canvas, True, True, 0)
# calculate size for window
w = int (self.canvas.figure.bbox.width)
h = int (self.canvas.figure.bbox.height)
self.toolmanager = self._get_toolmanager()
self.toolbar = self._get_toolbar()
self.statusbar = None
def add_widget(child, expand, fill, padding):
child.show()
self.vbox.pack_end(child, False, False, 0)
size_request = child.size_request()
return size_request.height
if self.toolmanager:
backend_tools.add_tools_to_manager(self.toolmanager)
if self.toolbar:
backend_tools.add_tools_to_container(self.toolbar)
self.statusbar = StatusbarGTK3(self.toolmanager)
h += add_widget(self.statusbar, False, False, 0)
h += add_widget(Gtk.HSeparator(), False, False, 0)
if self.toolbar is not None:
self.toolbar.show()
h += add_widget(self.toolbar, False, False, 0)
self.window.set_default_size (w, h)
def destroy(*args):
Gcf.destroy(num)
self.window.connect("destroy", destroy)
self.window.connect("delete_event", destroy)
if matplotlib.is_interactive():
self.window.show()
self.canvas.draw_idle()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolmanager is not None:
pass
elif self.toolbar is not None:
self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
self.canvas.grab_focus()
def destroy(self, *args):
self.vbox.destroy()
self.window.destroy()
self.canvas.destroy()
if self.toolbar:
self.toolbar.destroy()
if (Gcf.get_num_fig_managers() == 0 and
not matplotlib.is_interactive() and
Gtk.main_level() >= 1):
Gtk.main_quit()
def show(self):
# show the figure window
self.window.show()
self.window.present()
def full_screen_toggle (self):
self._full_screen_flag = not self._full_screen_flag
if self._full_screen_flag:
self.window.fullscreen()
else:
self.window.unfullscreen()
_full_screen_flag = False
def _get_toolbar(self):
# must be inited after the window, drawingArea and figure
# attrs are set
if rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2GTK3(self.canvas, self.window)
elif rcParams['toolbar'] == 'toolmanager':
toolbar = ToolbarGTK3(self.toolmanager)
else:
toolbar = None
return toolbar
def _get_toolmanager(self):
# must be initialised after toolbar has been setted
if rcParams['toolbar'] == 'toolmanager':
toolmanager = ToolManager(self.canvas.figure)
else:
toolmanager = None
return toolmanager
def get_window_title(self):
return self.window.get_title()
def set_window_title(self, title):
self.window.set_title(title)
def resize(self, width, height):
'set the canvas size in pixels'
#_, _, cw, ch = self.canvas.allocation
#_, _, ww, wh = self.window.allocation
#self.window.resize (width-cw+ww, height-ch+wh)
self.window.resize(width, height)
class NavigationToolbar2GTK3(NavigationToolbar2, Gtk.Toolbar):
def __init__(self, canvas, window):
self.win = window
GObject.GObject.__init__(self)
NavigationToolbar2.__init__(self, canvas)
self.ctx = None
def set_message(self, s):
self.message.set_label(s)
def set_cursor(self, cursor):
self.canvas.get_property("window").set_cursor(cursord[cursor])
Gtk.main_iteration()
def release(self, event):
try: del self._pixmapBack
except AttributeError: pass
def draw_rubberband(self, event, x0, y0, x1, y1):
'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744'
self.ctx = self.canvas.get_property("window").cairo_create()
# todo: instead of redrawing the entire figure, copy the part of
# the figure that was covered by the previous rubberband rectangle
self.canvas.draw()
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val) for val in (min(x0,x1), min(y0, y1), w, h)]
self.ctx.new_path()
self.ctx.set_line_width(0.5)
self.ctx.rectangle(rect[0], rect[1], rect[2], rect[3])
self.ctx.set_source_rgb(0, 0, 0)
self.ctx.stroke()
def _init_toolbar(self):
self.set_style(Gtk.ToolbarStyle.ICONS)
basedir = os.path.join(rcParams['datapath'],'images')
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.insert( Gtk.SeparatorToolItem(), -1 )
continue
fname = os.path.join(basedir, image_file + '.png')
image = Gtk.Image()
image.set_from_file(fname)
tbutton = Gtk.ToolButton()
tbutton.set_label(text)
tbutton.set_icon_widget(image)
self.insert(tbutton, -1)
tbutton.connect('clicked', getattr(self, callback))
tbutton.set_tooltip_text(tooltip_text)
toolitem = Gtk.SeparatorToolItem()
self.insert(toolitem, -1)
toolitem.set_draw(False)
toolitem.set_expand(True)
toolitem = Gtk.ToolItem()
self.insert(toolitem, -1)
self.message = Gtk.Label()
toolitem.add(self.message)
self.show_all()
def get_filechooser(self):
fc = FileChooserDialog(
title='Save the figure',
parent=self.win,
path=os.path.expanduser(rcParams['savefig.directory']),
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
fc.set_current_name(self.canvas.get_default_filename())
return fc
def save_figure(self, *args):
chooser = self.get_filechooser()
fname, format = chooser.get_filename_from_user()
chooser.destroy()
if fname:
startpath = os.path.expanduser(rcParams['savefig.directory'])
# Save dir for next time, unless empty str (i.e., use cwd).
if startpath != "":
rcParams['savefig.directory'] = (
os.path.dirname(six.text_type(fname)))
try:
self.canvas.figure.savefig(fname, format=format)
except Exception as e:
error_msg_gtk(str(e), parent=self)
def configure_subplots(self, button):
toolfig = Figure(figsize=(6,3))
canvas = self._get_canvas(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
w = int(toolfig.bbox.width)
h = int(toolfig.bbox.height)
window = Gtk.Window()
try:
window.set_icon_from_file(window_icon)
except (SystemExit, KeyboardInterrupt):
# re-raise exit type Exceptions
raise
except:
# we presumably already logged a message on the
# failure of the main plot, don't keep reporting
pass
window.set_title("Subplot Configuration Tool")
window.set_default_size(w, h)
vbox = Gtk.Box()
vbox.set_property("orientation", Gtk.Orientation.VERTICAL)
window.add(vbox)
vbox.show()
canvas.show()
vbox.pack_start(canvas, True, True, 0)
window.show()
def _get_canvas(self, fig):
return self.canvas.__class__(fig)
class FileChooserDialog(Gtk.FileChooserDialog):
"""GTK+ file selector which remembers the last file/directory
selected and presents the user with a menu of supported image formats
"""
def __init__ (self,
title = 'Save file',
parent = None,
action = Gtk.FileChooserAction.SAVE,
buttons = (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_SAVE, Gtk.ResponseType.OK),
path = None,
filetypes = [],
default_filetype = None
):
super (FileChooserDialog, self).__init__ (title, parent, action,
buttons)
self.set_default_response (Gtk.ResponseType.OK)
if not path: path = os.getcwd() + os.sep
# create an extra widget to list supported image formats
self.set_current_folder (path)
self.set_current_name ('image.' + default_filetype)
hbox = Gtk.Box(spacing=10)
hbox.pack_start(Gtk.Label(label="File Format:"), False, False, 0)
liststore = Gtk.ListStore(GObject.TYPE_STRING)
cbox = Gtk.ComboBox() #liststore)
cbox.set_model(liststore)
cell = Gtk.CellRendererText()
cbox.pack_start(cell, True)
cbox.add_attribute(cell, 'text', 0)
hbox.pack_start(cbox, False, False, 0)
self.filetypes = filetypes
self.sorted_filetypes = sorted(six.iteritems(filetypes))
default = 0
for i, (ext, name) in enumerate(self.sorted_filetypes):
liststore.append(["%s (*.%s)" % (name, ext)])
if ext == default_filetype:
default = i
cbox.set_active(default)
self.ext = default_filetype
def cb_cbox_changed (cbox, data=None):
"""File extension changed"""
head, filename = os.path.split(self.get_filename())
root, ext = os.path.splitext(filename)
ext = ext[1:]
new_ext = self.sorted_filetypes[cbox.get_active()][0]
self.ext = new_ext
if ext in self.filetypes:
filename = root + '.' + new_ext
elif ext == '':
filename = filename.rstrip('.') + '.' + new_ext
self.set_current_name (filename)
cbox.connect ("changed", cb_cbox_changed)
hbox.show_all()
self.set_extra_widget(hbox)
def get_filename_from_user (self):
while True:
filename = None
if self.run() != int(Gtk.ResponseType.OK):
break
filename = self.get_filename()
break
return filename, self.ext
class RubberbandGTK3(backend_tools.RubberbandBase):
def __init__(self, *args, **kwargs):
backend_tools.RubberbandBase.__init__(self, *args, **kwargs)
self.ctx = None
def draw_rubberband(self, x0, y0, x1, y1):
# 'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/
# Recipe/189744'
self.ctx = self.figure.canvas.get_property("window").cairo_create()
# todo: instead of redrawing the entire figure, copy the part of
# the figure that was covered by the previous rubberband rectangle
self.figure.canvas.draw()
height = self.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val) for val in (min(x0, x1), min(y0, y1), w, h)]
self.ctx.new_path()
self.ctx.set_line_width(0.5)
self.ctx.rectangle(rect[0], rect[1], rect[2], rect[3])
self.ctx.set_source_rgb(0, 0, 0)
self.ctx.stroke()
class ToolbarGTK3(ToolContainerBase, Gtk.Box):
def __init__(self, toolmanager):
ToolContainerBase.__init__(self, toolmanager)
Gtk.Box.__init__(self)
self.set_property("orientation", Gtk.Orientation.VERTICAL)
self._toolarea = Gtk.Box()
self._toolarea.set_property('orientation', Gtk.Orientation.HORIZONTAL)
self.pack_start(self._toolarea, False, False, 0)
self._toolarea.show_all()
self._groups = {}
self._toolitems = {}
def add_toolitem(self, name, group, position, image_file, description,
toggle):
if toggle:
tbutton = Gtk.ToggleToolButton()
else:
tbutton = Gtk.ToolButton()
tbutton.set_label(name)
if image_file is not None:
image = Gtk.Image()
image.set_from_file(image_file)
tbutton.set_icon_widget(image)
if position is None:
position = -1
self._add_button(tbutton, group, position)
signal = tbutton.connect('clicked', self._call_tool, name)
tbutton.set_tooltip_text(description)
tbutton.show_all()
self._toolitems.setdefault(name, [])
self._toolitems[name].append((tbutton, signal))
def _add_button(self, button, group, position):
if group not in self._groups:
if self._groups:
self._add_separator()
toolbar = Gtk.Toolbar()
toolbar.set_style(Gtk.ToolbarStyle.ICONS)
self._toolarea.pack_start(toolbar, False, False, 0)
toolbar.show_all()
self._groups[group] = toolbar
self._groups[group].insert(button, position)
def _call_tool(self, btn, name):
self.trigger_tool(name)
def toggle_toolitem(self, name, toggled):
if name not in self._toolitems:
return
for toolitem, signal in self._toolitems[name]:
toolitem.handler_block(signal)
toolitem.set_active(toggled)
toolitem.handler_unblock(signal)
def remove_toolitem(self, name):
if name not in self._toolitems:
self.toolmanager.message_event('%s Not in toolbar' % name, self)
return
for group in self._groups:
for toolitem, _signal in self._toolitems[name]:
if toolitem in self._groups[group]:
self._groups[group].remove(toolitem)
del self._toolitems[name]
def _add_separator(self):
sep = Gtk.Separator()
sep.set_property("orientation", Gtk.Orientation.VERTICAL)
self._toolarea.pack_start(sep, False, True, 0)
sep.show_all()
class StatusbarGTK3(StatusbarBase, Gtk.Statusbar):
def __init__(self, *args, **kwargs):
StatusbarBase.__init__(self, *args, **kwargs)
Gtk.Statusbar.__init__(self)
self._context = self.get_context_id('message')
def set_message(self, s):
self.pop(self._context)
self.push(self._context, s)
class SaveFigureGTK3(backend_tools.SaveFigureBase):
def get_filechooser(self):
fc = FileChooserDialog(
title='Save the figure',
parent=self.figure.canvas.manager.window,
path=os.path.expanduser(rcParams['savefig.directory']),
filetypes=self.figure.canvas.get_supported_filetypes(),
default_filetype=self.figure.canvas.get_default_filetype())
fc.set_current_name(self.figure.canvas.get_default_filename())
return fc
def trigger(self, *args, **kwargs):
chooser = self.get_filechooser()
fname, format_ = chooser.get_filename_from_user()
chooser.destroy()
if fname:
startpath = os.path.expanduser(rcParams['savefig.directory'])
if startpath == '':
# explicitly missing key or empty str signals to use cwd
rcParams['savefig.directory'] = startpath
else:
# save dir for next time
rcParams['savefig.directory'] = os.path.dirname(
six.text_type(fname))
try:
self.figure.canvas.print_figure(fname, format=format_)
except Exception as e:
error_msg_gtk(str(e), parent=self)
class SetCursorGTK3(backend_tools.SetCursorBase):
def set_cursor(self, cursor):
self.figure.canvas.get_property("window").set_cursor(cursord[cursor])
class ConfigureSubplotsGTK3(backend_tools.ConfigureSubplotsBase, Gtk.Window):
def __init__(self, *args, **kwargs):
backend_tools.ConfigureSubplotsBase.__init__(self, *args, **kwargs)
self.window = None
def init_window(self):
if self.window:
return
self.window = Gtk.Window(title="Subplot Configuration Tool")
try:
self.window.window.set_icon_from_file(window_icon)
except (SystemExit, KeyboardInterrupt):
# re-raise exit type Exceptions
raise
except:
# we presumably already logged a message on the
# failure of the main plot, don't keep reporting
pass
self.vbox = Gtk.Box()
self.vbox.set_property("orientation", Gtk.Orientation.VERTICAL)
self.window.add(self.vbox)
self.vbox.show()
self.window.connect('destroy', self.destroy)
toolfig = Figure(figsize=(6, 3))
canvas = self.figure.canvas.__class__(toolfig)
toolfig.subplots_adjust(top=0.9)
SubplotTool(self.figure, toolfig)
w = int(toolfig.bbox.width)
h = int(toolfig.bbox.height)
self.window.set_default_size(w, h)
canvas.show()
self.vbox.pack_start(canvas, True, True, 0)
self.window.show()
def destroy(self, *args):
self.window.destroy()
self.window = None
def _get_canvas(self, fig):
return self.canvas.__class__(fig)
def trigger(self, sender, event, data=None):
self.init_window()
self.window.present()
# Define the file to use as the GTk icon
if sys.platform == 'win32':
icon_filename = 'matplotlib.png'
else:
icon_filename = 'matplotlib.svg'
window_icon = os.path.join(
matplotlib.rcParams['datapath'], 'images', icon_filename)
def error_msg_gtk(msg, parent=None):
if parent is not None: # find the toplevel Gtk.Window
parent = parent.get_toplevel()
if not parent.is_toplevel():
parent = None
if not isinstance(msg, six.string_types):
msg = ','.join(map(str, msg))
dialog = Gtk.MessageDialog(
parent = parent,
type = Gtk.MessageType.ERROR,
buttons = Gtk.ButtonsType.OK,
message_format = msg)
dialog.run()
dialog.destroy()
backend_tools.ToolSaveFigure = SaveFigureGTK3
backend_tools.ToolConfigureSubplots = ConfigureSubplotsGTK3
backend_tools.ToolSetCursor = SetCursorGTK3
backend_tools.ToolRubberband = RubberbandGTK3
Toolbar = ToolbarGTK3
@_Backend.export
class _BackendGTK3(_Backend):
FigureCanvas = FigureCanvasGTK3
FigureManager = FigureManagerGTK3
@staticmethod
def trigger_manager_draw(manager):
manager.canvas.draw_idle()
@staticmethod
def mainloop():
if Gtk.main_level() == 0:
Gtk.main()
| mit |
bowang/tensorflow | tensorflow/examples/learn/text_classification.py | 17 | 6649 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for DNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 10
EMBEDDING_SIZE = 50
n_words = 0
MAX_LABEL = 15
WORDS_FEATURE = 'words' # Name of the input words feature.
def estimator_spec_for_softmax_classification(
logits, labels, mode):
"""Returns EstimatorSpec instance for softmax classification."""
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def bag_of_words_model(features, labels, mode):
"""A bag-of-words model. Note it disregards the word order in the text."""
bow_column = tf.feature_column.categorical_column_with_identity(
WORDS_FEATURE, num_buckets=n_words)
bow_embedding_column = tf.feature_column.embedding_column(
bow_column, dimension=EMBEDDING_SIZE)
bow = tf.feature_column.input_layer(
features,
feature_columns=[bow_embedding_column])
logits = tf.layers.dense(bow, MAX_LABEL, activation=None)
return estimator_spec_for_softmax_classification(
logits=logits, labels=labels, mode=mode)
def rnn_model(features, labels, mode):
"""RNN model to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features[WORDS_FEATURE], vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = tf.unstack(word_vectors, axis=1)
# Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE.
cell = tf.contrib.rnn.GRUCell(EMBEDDING_SIZE)
# Create an unrolled Recurrent Neural Networks to length of
# MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit.
_, encoding = tf.contrib.rnn.static_rnn(cell, word_list, dtype=tf.float32)
# Given encoding of RNN, take encoding of last step (e.g hidden size of the
# neural network of last step) and pass it as features for softmax
# classification over output classes.
logits = tf.layers.dense(encoding, MAX_LABEL, activation=None)
return estimator_spec_for_softmax_classification(
logits=logits, labels=labels, mode=mode)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.Series(dbpedia.train.data[:,1])
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.Series(dbpedia.test.data[:,1])
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(
MAX_DOCUMENT_LENGTH)
x_transform_train = vocab_processor.fit_transform(x_train)
x_transform_test = vocab_processor.transform(x_test)
x_train = np.array(list(x_transform_train))
x_test = np.array(list(x_transform_test))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
# Switch between rnn_model and bag_of_words_model to test different models.
model_fn = rnn_model
if FLAGS.bow_model:
# Subtract 1 because VocabularyProcessor outputs a word-id matrix where word
# ids start from 1 and 0 means 'no word'. But
# categorical_column_with_identity assumes 0-based count and uses -1 for
# missing word.
x_train -= 1
x_test -= 1
model_fn = bag_of_words_model
classifier = tf.estimator.Estimator(model_fn=model_fn)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_train},
y=y_train,
batch_size=len(x_train),
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
parser.add_argument(
'--bow_model',
default=False,
help='Run with BOW model instead of RNN.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
kmike/scikit-learn | examples/svm/plot_svm_regression.py | 8 | 1431 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynominial and RBF
kernels.
"""
print(__doc__)
###############################################################################
# Generate sample data
import numpy as np
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
from sklearn.svm import SVR
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
import pylab as pl
pl.scatter(X, y, c='k', label='data')
pl.hold('on')
pl.plot(X, y_rbf, c='g', label='RBF model')
pl.plot(X, y_lin, c='r', label='Linear model')
pl.plot(X, y_poly, c='b', label='Polynomial model')
pl.xlabel('data')
pl.ylabel('target')
pl.title('Support Vector Regression')
pl.legend()
pl.show()
| bsd-3-clause |
harisbal/pandas | pandas/tests/indexes/timedeltas/test_ops.py | 1 | 14479 | from datetime import timedelta
import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
from pandas import (
Series, Timedelta, TimedeltaIndex, Timestamp, timedelta_range,
to_timedelta
)
from pandas.core.dtypes.generic import ABCDateOffset
from pandas.tests.test_base import Ops
from pandas.tseries.offsets import Day, Hour
class TestTimedeltaIndexOps(Ops):
def setup_method(self, method):
super(TestTimedeltaIndexOps, self).setup_method(method)
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
f = lambda x: isinstance(x, TimedeltaIndex)
self.check_ops_properties(TimedeltaIndex._field_ops, f)
self.check_ops_properties(TimedeltaIndex._object_ops, f)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
assert idx1.is_monotonic
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.min() == Timedelta('1 days')
assert idx.max() == Timedelta('3 days')
assert idx.argmin() == 0
assert idx.argmax() == 2
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
assert pd.isna(getattr(obj, op)())
obj = TimedeltaIndex([pd.NaT])
assert pd.isna(getattr(obj, op)())
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
assert pd.isna(getattr(obj, op)())
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
assert np.min(td) == Timedelta('16815 days')
assert np.max(td) == Timedelta('16820 days')
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, errmsg, np.min, td, out=0)
tm.assert_raises_regex(ValueError, errmsg, np.max, td, out=0)
assert np.argmin(td) == 0
assert np.argmax(td) == 5
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(
ValueError, errmsg, np.argmin, td, out=0)
tm.assert_raises_regex(
ValueError, errmsg, np.argmax, td, out=0)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
assert idx[0] in idx
def test_unknown_attribute(self):
# see gh-9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
assert 'foo' not in ts.__dict__.keys()
pytest.raises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, idx)
assert ordered.freq == idx.freq
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]),
check_dtype=False)
assert ordered.freq == idx.freq
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
tm.assert_index_equal(ordered, idx[::-1])
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, expected)
assert ordered.freq is None
ordered = idx.sort_values(ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
assert ordered.freq is None
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
idx_dup = idx.append(idx)
assert idx_dup.freq is None # freq is reset
result = idx_dup.drop_duplicates()
tm.assert_index_equal(idx, result)
assert result.freq is None
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
@pytest.mark.parametrize('freq', ['D', '3D', '-3D',
'H', '2H', '-2H',
'T', '2T', 'S', '-3S'])
def test_infer_freq(self, freq):
# GH#11018
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
assert result.freq == freq
def test_shift(self):
pass # handled in test_arithmetic.py
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_nat(self):
assert pd.TimedeltaIndex._na_value is pd.NaT
assert pd.TimedeltaIndex([])._na_value is pd.NaT
idx = pd.TimedeltaIndex(['1 days', '2 days'])
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
assert idx.hasnans is False
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
assert idx.hasnans is True
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert idx.astype(object).equals(idx)
assert idx.astype(object).equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
assert not idx.equals(idx2.astype(object))
assert not idx.astype(object).equals(idx2)
assert not idx.astype(object).equals(idx2.astype(object))
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
@pytest.mark.parametrize('values', [['0 days', '2 days', '4 days'], []])
@pytest.mark.parametrize('freq', ['2D', Day(2), '48H', Hour(48)])
def test_freq_setter(self, values, freq):
# GH 20678
idx = TimedeltaIndex(values)
# can set to an offset, converting from string if necessary
idx.freq = freq
assert idx.freq == freq
assert isinstance(idx.freq, ABCDateOffset)
# can reset to None
idx.freq = None
assert idx.freq is None
def test_freq_setter_errors(self):
# GH 20678
idx = TimedeltaIndex(['0 days', '2 days', '4 days'])
# setting with an incompatible freq
msg = ('Inferred frequency 2D from passed values does not conform to '
'passed frequency 5D')
with tm.assert_raises_regex(ValueError, msg):
idx.freq = '5D'
# setting with a non-fixed frequency
msg = r'<2 \* BusinessDays> is a non-fixed frequency'
with tm.assert_raises_regex(ValueError, msg):
idx.freq = '2B'
# setting with non-freq string
with tm.assert_raises_regex(ValueError, 'Invalid frequency'):
idx.freq = 'foo'
class TestTimedeltas(object):
def test_timedelta_ops(self):
# GH4984
# make sure ops return Timedelta
s = Series([Timestamp('20130101') + timedelta(seconds=i * i)
for i in range(10)])
td = s.diff()
result = td.mean()
expected = to_timedelta(timedelta(seconds=9))
assert result == expected
result = td.to_frame().mean()
assert result[0] == expected
result = td.quantile(.1)
expected = Timedelta(np.timedelta64(2600, 'ms'))
assert result == expected
result = td.median()
expected = to_timedelta('00:00:09')
assert result == expected
result = td.to_frame().median()
assert result[0] == expected
# GH 6462
# consistency in returned values for sum
result = td.sum()
expected = to_timedelta('00:01:21')
assert result == expected
result = td.to_frame().sum()
assert result[0] == expected
# std
result = td.std()
expected = to_timedelta(Series(td.dropna().values).std())
assert result == expected
result = td.to_frame().std()
assert result[0] == expected
# invalid ops
for op in ['skew', 'kurt', 'sem', 'prod']:
pytest.raises(TypeError, getattr(td, op))
# GH 10040
# make sure NaT is properly handled by median()
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07')])
assert s.diff().median() == timedelta(days=4)
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07'),
Timestamp('2015-02-15')])
assert s.diff().median() == timedelta(days=6)
| bsd-3-clause |
danieljwest/mycli | mycli/packages/tabulate.py | 16 | 38129 | # -*- coding: utf-8 -*-
"""Pretty-print tabular data."""
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
from decimal import Decimal
from platform import python_version_tuple
from wcwidth import wcswidth
import re
if python_version_tuple()[0] < "3":
from itertools import izip_longest
from functools import partial
_none_type = type(None)
_int_type = int
_long_type = long
_float_type = float
_text_type = unicode
_binary_type = str
def _is_file(f):
return isinstance(f, file)
else:
from itertools import zip_longest as izip_longest
from functools import reduce, partial
_none_type = type(None)
_int_type = int
_long_type = int
_float_type = float
_text_type = str
_binary_type = bytes
import io
def _is_file(f):
return isinstance(f, io.IOBase)
__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"]
__version__ = "0.7.4"
MIN_PADDING = 2
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
# A table structure is suppposed to be:
#
# --- lineabove ---------
# headerrow
# --- linebelowheader ---
# datarow
# --- linebewteenrows ---
# ... (more datarows) ...
# --- linebewteenrows ---
# last datarow
# --- linebelow ---------
#
# TableFormat's line* elements can be
#
# - either None, if the element is not used,
# - or a Line tuple,
# - or a function: [col_widths], [col_alignments] -> string.
#
# TableFormat's *row elements can be
#
# - either None, if the element is not used,
# - or a DataRow tuple,
# - or a function: [cell_values], [col_widths], [col_alignments] -> string.
#
# padding (an integer) is the amount of white space around data values.
#
# with_header_hide:
#
# - either None, to display all table elements unconditionally,
# - or a list of elements not to be displayed if the table has column headers.
#
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "with_header_hide"])
def _pipe_segment_with_colons(align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
w = colwidth
if align in ["right", "decimal"]:
return ('-' * (w - 1)) + ":"
elif align == "center":
return ":" + ('-' * (w - 2)) + ":"
elif align == "left":
return ":" + ('-' * (w - 1))
else:
return '-' * w
def _pipe_line_with_colons(colwidths, colaligns):
"""Return a horizontal line with optional colons to indicate column's
alignment (as in `pipe` output format)."""
segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)]
return "|" + "|".join(segments) + "|"
def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
alignment = { "left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| ' }
# hard-coded padding _around_ align attribute and value together
# rather than padding parameter which affects only the value
values_with_attrs = [' ' + alignment.get(a, '') + c + ' '
for c, a in zip(cell_values, colaligns)]
colsep = separator*2
return (separator + colsep.join(values_with_attrs)).rstrip()
def _html_row_with_attrs(celltag, cell_values, colwidths, colaligns):
alignment = { "left": '',
"right": ' style="text-align: right;"',
"center": ' style="text-align: center;"',
"decimal": ' style="text-align: right;"' }
values_with_attrs = ["<{0}{1}>{2}</{0}>".format(celltag, alignment.get(a, ''), c)
for c, a in zip(cell_values, colaligns)]
return "<tr>" + "".join(values_with_attrs).rstrip() + "</tr>"
def _latex_line_begin_tabular(colwidths, colaligns, booktabs=False):
alignment = { "left": "l", "right": "r", "center": "c", "decimal": "r" }
tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns])
return "\n".join(["\\begin{tabular}{" + tabular_columns_fmt + "}",
"\\toprule" if booktabs else "\hline"])
LATEX_ESCAPE_RULES = {r"&": r"\&", r"%": r"\%", r"$": r"\$", r"#": r"\#",
r"_": r"\_", r"^": r"\^{}", r"{": r"\{", r"}": r"\}",
r"~": r"\textasciitilde{}", "\\": r"\textbackslash{}",
r"<": r"\ensuremath{<}", r">": r"\ensuremath{>}"}
def _latex_row(cell_values, colwidths, colaligns):
def escape_char(c):
return LATEX_ESCAPE_RULES.get(c, c)
escaped_values = ["".join(map(escape_char, cell)) for cell in cell_values]
rowfmt = DataRow("", "&", "\\\\")
return _build_simple_row(escaped_values, rowfmt)
_table_formats = {"simple":
TableFormat(lineabove=Line("", "-", " ", ""),
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
with_header_hide=["lineabove", "linebelow"]),
"plain":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"fancy_grid":
TableFormat(lineabove=Line("╒", "═", "╤", "╕"),
linebelowheader=Line("╞", "═", "╪", "╡"),
linebetweenrows=Line("├", "─", "┼", "┤"),
linebelow=Line("╘", "═", "╧", "╛"),
headerrow=DataRow("│", "│", "│"),
datarow=DataRow("│", "│", "│"),
padding=1, with_header_hide=None),
"pipe":
TableFormat(lineabove=_pipe_line_with_colons,
linebelowheader=_pipe_line_with_colons,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"orgtbl":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"psql":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"rst":
TableFormat(lineabove=Line("", "=", " ", ""),
linebelowheader=Line("", "=", " ", ""),
linebetweenrows=None,
linebelow=Line("", "=", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"mediawiki":
TableFormat(lineabove=Line("{| class=\"wikitable\" style=\"text-align: left;\"",
"", "", "\n|+ <!-- caption -->\n|-"),
linebelowheader=Line("|-", "", "", ""),
linebetweenrows=Line("|-", "", "", ""),
linebelow=Line("|}", "", "", ""),
headerrow=partial(_mediawiki_row_with_attrs, "!"),
datarow=partial(_mediawiki_row_with_attrs, "|"),
padding=0, with_header_hide=None),
"html":
TableFormat(lineabove=Line("<table>", "", "", ""),
linebelowheader=None,
linebetweenrows=None,
linebelow=Line("</table>", "", "", ""),
headerrow=partial(_html_row_with_attrs, "th"),
datarow=partial(_html_row_with_attrs, "td"),
padding=0, with_header_hide=None),
"latex":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"latex_booktabs":
TableFormat(lineabove=partial(_latex_line_begin_tabular, booktabs=True),
linebelowheader=Line("\\midrule", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\bottomrule\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"tsv":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", "\t", ""),
datarow=DataRow("", "\t", ""),
padding=0, with_header_hide=None)}
tabulate_formats = list(sorted(_table_formats.keys()))
_invisible_codes = re.compile(r"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
_invisible_codes_bytes = re.compile(b"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
def simple_separated_format(separator):
"""Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\\t") ; \
tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23'
True
"""
return TableFormat(None, None, None, None,
headerrow=DataRow('', separator, ''),
datarow=DataRow('', separator, ''),
padding=0, with_header_hide=None)
def _isconvertible(conv, string):
try:
n = conv(string)
return True
except (ValueError, TypeError):
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
"""
return _isconvertible(float, string)
def _isint(string):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return type(string) is _int_type or type(string) is _long_type or \
(isinstance(string, _binary_type) or isinstance(string, _text_type)) and \
_isconvertible(int, string)
def _type(string, has_invisible=True):
"""The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
if isinstance(string, (bool, Decimal,)):
return _text_type
elif hasattr(string, "isoformat"): # datetime.datetime, date, and time
return _text_type
elif _isint(string):
return int
elif _isnumber(string):
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padleft(width, s, has_invisible=True):
"""Flush right.
>>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
True
"""
lwidth = width - wcswidth(_strip_invisible(s) if has_invisible else s)
return ' ' * lwidth + s
def _padright(width, s, has_invisible=True):
"""Flush left.
>>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 '
True
"""
rwidth = width - wcswidth(_strip_invisible(s) if has_invisible else s)
return s + ' ' * rwidth
def _padboth(width, s, has_invisible=True):
"""Center string.
>>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 '
True
"""
xwidth = width - wcswidth(_strip_invisible(s) if has_invisible else s)
lwidth = xwidth // 2
rwidth = 0 if xwidth <= 0 else lwidth + xwidth % 2
return ' ' * lwidth + s + ' ' * rwidth
def _strip_invisible(s):
"Remove invisible ANSI color codes."
if isinstance(s, _text_type):
return re.sub(_invisible_codes, "", s)
else: # a bytestring
return re.sub(_invisible_codes_bytes, "", s)
def _visible_width(s):
"""Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
"""
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return wcswidth(_strip_invisible(s))
else:
return wcswidth(_text_type(s))
def _align_column(strings, alignment, minwidth=0, has_invisible=True):
"""[string] -> [padded_string]
>>> list(map(str,_align_column(["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"], "decimal")))
[' 12.345 ', '-1234.5 ', ' 1.23 ', ' 1234.5 ', ' 1e+234 ', ' 1.0e234']
>>> list(map(str,_align_column(['123.4', '56.7890'], None)))
['123.4', '56.7890']
"""
if alignment == "right":
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment == "center":
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment == "decimal":
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
elif not alignment:
return strings
else:
strings = [s.strip() for s in strings]
padfn = _padright
if has_invisible:
width_fn = _visible_width
else:
width_fn = wcswidth
maxwidth = max(max(map(width_fn, strings)), minwidth)
padded_strings = [padfn(maxwidth, s, has_invisible) for s in strings]
return padded_strings
def _more_generic(type1, type2):
types = { _none_type: 0, int: 1, float: 2, _binary_type: 3, _text_type: 4 }
invtypes = { 4: _text_type, 3: _binary_type, 2: float, 1: int, 0: _none_type }
moregeneric = max(types.get(type1, 4), types.get(type2, 4))
return invtypes[moregeneric]
def _column_type(strings, has_invisible=True):
"""The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
types = [_type(s, has_invisible) for s in strings ]
return reduce(_more_generic, types, int)
def _format(val, valtype, floatfmt, missingval=""):
"""Format a value accoding to its type.
Unicode is supported:
>>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
"""
if val is None:
return missingval
if valtype in [int, _text_type]:
return "{0}".format(val)
elif valtype is _binary_type:
try:
return _text_type(val, "ascii")
except TypeError:
return _text_type(val)
elif valtype is float:
return format(float(val), floatfmt)
else:
return "{0}".format(val)
def _align_header(header, alignment, width):
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return "{0}".format(header)
else:
return _padleft(width, header)
def _normalize_tabular_data(tabular_data, headers):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* list of dicts (usually used with headers="keys")
* list of OrderedDicts (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
"""
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = tabular_data.keys()
rows = list(izip_longest(*tabular_data.values())) # columns have to be transposed
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
keys = tabular_data.keys()
vals = tabular_data.values # values matrix doesn't need to be transposed
names = tabular_data.index
rows = [[v]+list(row) for v,row in zip(names, vals)]
else:
raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type,keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
# numpy record array
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")):
# namedtuple
headers = list(map(_text_type, rows[0]._fields))
elif (len(rows) > 0
and isinstance(rows[0], dict)):
# dict or OrderedDict
uniq_keys = set() # implements hashed lookup
keys = [] # storage for set
if headers == "firstrow":
firstdict = rows[0] if len(rows) > 0 else {}
keys.extend(firstdict.keys())
uniq_keys.update(keys)
rows = rows[1:]
for row in rows:
for k in row.keys():
#Save unique items in input order
if k not in uniq_keys:
keys.append(k)
uniq_keys.add(k)
if headers == 'keys':
headers = keys
elif isinstance(headers, dict):
# a dict of headers for a list of dicts
headers = [headers.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
elif headers == "firstrow":
if len(rows) > 0:
headers = [firstdict.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
else:
headers = []
elif headers:
raise ValueError('headers for a list of dicts is not a dict or a keyword')
rows = [[row.get(k) for k in keys] for row in rows]
elif headers == "keys" and len(rows) > 0:
# keys are column indices
headers = list(map(_text_type, range(len(rows[0]))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
headers = list(map(_text_type, rows[0])) # headers should be strings
rows = rows[1:]
headers = list(map(_text_type,headers))
rows = list(map(list,rows))
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""]*(ncols - nhs) + headers
return rows, headers
def table_formats():
return _table_formats.keys()
def tabulate(tabular_data, headers=[], tablefmt="simple",
floatfmt="g", numalign="decimal", stralign="left",
missingval=""):
"""Format a fixed width table for pretty printing.
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
--- ---------
1 2.34
-56 8.999
2 10001
--- ---------
The first required argument (`tabular_data`) can be a
list-of-lists (or another iterable of iterables), a list of named
tuples, a dictionary of iterables, an iterable of dictionaries,
a two-dimensional NumPy array, NumPy record array, or a Pandas'
dataframe.
Table headers
-------------
To print nice column headers, supply the second argument (`headers`):
- `headers` can be an explicit list of column headers
- if `headers="firstrow"`, then the first row of data is used
- if `headers="keys"`, then dictionary keys or column indices are used
Otherwise a headerless table is produced.
If the number of headers is less than the number of columns, they
are supposed to be names of the last columns. This is consistent
with the plain-text format of R and Pandas' dataframes.
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
... headers="firstrow"))
sex age
----- ----- -----
Alice F 24
Bob M 19
Column alignment
----------------
`tabulate` tries to detect column types automatically, and aligns
the values properly. By default it aligns decimal points of the
numbers (or flushes integer numbers to the right), and flushes
everything else to the left. Possible column alignments
(`numalign`, `stralign`) are: "right", "center", "left", "decimal"
(only for `numalign`), and None (to disable alignment).
Table formats
-------------
`floatfmt` is a format specification used for columns which
contain numeric data with a decimal point.
`None` values are replaced with a `missingval` string:
>>> print(tabulate([["spam", 1, None],
... ["eggs", 42, 3.14],
... ["other", None, 2.7]], missingval="?"))
----- -- ----
spam 1 ?
eggs 42 3.14
other ? 2.7
----- -- ----
Various plain-text table formats (`tablefmt`) are supported:
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
'latex', and 'latex_booktabs'. Variable `tabulate_formats` contains the list of
currently supported formats.
"plain" format doesn't use any pseudographics to draw tables,
it separates columns with a double space:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "plain"))
strings numbers
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
spam 41.9999
eggs 451
"simple" format is like Pandoc simple_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "simple"))
strings numbers
--------- ---------
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
---- --------
spam 41.9999
eggs 451
---- --------
"grid" is similar to tables produced by Emacs table.el package or
Pandoc grid_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "grid"))
+-----------+-----------+
| strings | numbers |
+===========+===========+
| spam | 41.9999 |
+-----------+-----------+
| eggs | 451 |
+-----------+-----------+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+------+----------+
| spam | 41.9999 |
+------+----------+
| eggs | 451 |
+------+----------+
"fancy_grid" draws a grid using box-drawing characters:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "fancy_grid"))
╒═══════════╤═══════════╕
│ strings │ numbers │
╞═══════════╪═══════════╡
│ spam │ 41.9999 │
├───────────┼───────────┤
│ eggs │ 451 │
╘═══════════╧═══════════╛
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
pipe_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "pipe"))
| strings | numbers |
|:----------|----------:|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|:-----|---------:|
| spam | 41.9999 |
| eggs | 451 |
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
are slightly different from "pipe" format by not using colons to
define column alignment, and using a "+" sign to indicate line
intersections:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "orgtbl"))
| strings | numbers |
|-----------+-----------|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
| spam | 41.9999 |
| eggs | 451 |
"rst" is like a simple table format from reStructuredText; please
note that reStructuredText accepts also "grid" tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "rst"))
========= =========
strings numbers
========= =========
spam 41.9999
eggs 451
========= =========
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
==== ========
spam 41.9999
eggs 451
==== ========
"mediawiki" produces a table markup used in Wikipedia and on other
MediaWiki-based sites:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="mediawiki"))
{| class="wikitable" style="text-align: left;"
|+ <!-- caption -->
|-
! strings !! align="right"| numbers
|-
| spam || align="right"| 41.9999
|-
| eggs || align="right"| 451
|}
"html" produces HTML markup:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="html"))
<table>
<tr><th>strings </th><th style="text-align: right;"> numbers</th></tr>
<tr><td>spam </td><td style="text-align: right;"> 41.9999</td></tr>
<tr><td>eggs </td><td style="text-align: right;"> 451 </td></tr>
</table>
"latex" produces a tabular environment of LaTeX document markup:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
\\begin{tabular}{lr}
\\hline
spam & 41.9999 \\\\
eggs & 451 \\\\
\\hline
\\end{tabular}
"latex_booktabs" produces a tabular environment of LaTeX document markup
using the booktabs.sty package:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs"))
\\begin{tabular}{lr}
\\toprule
spam & 41.9999 \\\\
eggs & 451 \\\\
\\bottomrule
\end{tabular}
"""
if tabular_data is None:
tabular_data = []
list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\n'.join(['\t'.join(map(_text_type, headers))] + \
['\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
if has_invisible:
width_fn = _visible_width
else:
width_fn = wcswidth
# format rows and columns, convert numeric values to strings
cols = list(zip(*list_of_lists))
coltypes = list(map(_column_type, cols))
cols = [[_format(v, ct, floatfmt, missingval) for v in c]
for c,ct in zip(cols, coltypes)]
# align columns
aligns = [numalign if ct in [int,float] else stralign for ct in coltypes]
minwidths = [width_fn(h) + MIN_PADDING for h in headers] if headers else [0]*len(cols)
cols = [_align_column(c, a, minw, has_invisible)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
t_cols = cols or [['']] * len(headers)
t_aligns = aligns or [stralign] * len(headers)
minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, t_cols)]
headers = [_align_header(h, a, minw)
for h, a, minw in zip(headers, t_aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [width_fn(c[0]) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows, minwidths, aligns)
def _build_simple_row(padded_cells, rowfmt):
"Format row according to DataRow format without padding."
begin, sep, end = rowfmt
return (begin + sep.join(padded_cells) + end).rstrip()
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
"Return a string which represents a row of data cells."
if not rowfmt:
return None
if hasattr(rowfmt, "__call__"):
return rowfmt(padded_cells, colwidths, colaligns)
else:
return _build_simple_row(padded_cells, rowfmt)
def _build_line(colwidths, colaligns, linefmt):
"Return a string which represents a horizontal line."
if not linefmt:
return None
if hasattr(linefmt, "__call__"):
return linefmt(colwidths, colaligns)
else:
begin, fill, sep, end = linefmt
cells = [fill*w for w in colwidths]
return _build_simple_row(cells, (begin, sep, end))
def _pad_row(cells, padding):
if cells:
pad = " "*padding
padded_cells = [pad + cell + pad for cell in cells]
return padded_cells
else:
return cells
def _format_table(fmt, headers, rows, colwidths, colaligns):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []
pad = fmt.padding
headerrow = fmt.headerrow
padded_widths = [(w + 2*pad) for w in colwidths]
padded_headers = _pad_row(headers, pad)
padded_rows = [_pad_row(row, pad) for row in rows]
if fmt.lineabove and "lineabove" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.lineabove))
if padded_headers:
lines.append(_build_row(padded_headers, padded_widths, colaligns, headerrow))
if fmt.linebelowheader and "linebelowheader" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.linebelowheader))
if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in padded_rows[:-1]:
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
lines.append(_build_line(padded_widths, colaligns, fmt.linebetweenrows))
# the last row without a line below
lines.append(_build_row(padded_rows[-1], padded_widths, colaligns, fmt.datarow))
else:
for row in padded_rows:
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
if fmt.linebelow and "linebelow" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.linebelow))
return "\n".join(lines)
def _main():
"""\
Usage: tabulate [options] [FILE ...]
Pretty-print tabular data. See also https://bitbucket.org/astanin/python-tabulate
FILE a filename of the file with tabular data;
if "-" or missing, read data from stdin.
Options:
-h, --help show this message
-1, --header use the first row of data as a table header
-s REGEXP, --sep REGEXP use a custom column separator (default: whitespace)
-f FMT, --format FMT set output table format; supported formats:
plain, simple, grid, fancy_grid, pipe, orgtbl,
rst, mediawiki, html, latex, latex_booktabs, tsv
(default: simple)
"""
import getopt
import sys
import textwrap
usage = textwrap.dedent(_main.__doc__)
try:
opts, args = getopt.getopt(sys.argv[1:],
"h1f:s:",
["help", "header", "format", "separator"])
except getopt.GetoptError as e:
print(e)
print(usage)
sys.exit(2)
headers = []
tablefmt = "simple"
sep = r"\s+"
for opt, value in opts:
if opt in ["-1", "--header"]:
headers = "firstrow"
elif opt in ["-f", "--format"]:
if value not in tabulate_formats:
print("%s is not a supported table format" % value)
print(usage)
sys.exit(3)
tablefmt = value
elif opt in ["-s", "--sep"]:
sep = value
elif opt in ["-h", "--help"]:
print(usage)
sys.exit(0)
files = [sys.stdin] if not args else args
for f in files:
if f == "-":
f = sys.stdin
if _is_file(f):
_pprint_file(f, headers=headers, tablefmt=tablefmt, sep=sep)
else:
with open(f) as fobj:
_pprint_file(fobj)
def _pprint_file(fobject, headers, tablefmt, sep):
rows = fobject.readlines()
table = [re.split(sep, r.rstrip()) for r in rows]
print(tabulate(table, headers, tablefmt))
if __name__ == "__main__":
_main()
| bsd-3-clause |
momenteg/python_scripts | hidden_supernova_search/read_and_plot_data_injected_in_Sndaq.py | 1 | 4011 | #!/usr/bin/python
import pandas as pd
import numpy as np
import glob
import matplotlib.pyplot as plt
import seaborn
import subprocess
import os
import re
def mount_mogon():
print("mounting mogon via sshfs")
string_ = "sshfs -o nonempty dummy_user@dummy_address:/etapfs02/icecubehpc/gmoment/output_hidden_supernova/ mount_mogon/ "
os.system(string_)
def read_data(dummy_var):
print("reading data from: ", dummy_var )
list_ = glob.glob(dummy_var)
print("number of files: ", len(list_))
return list_
def filling_df_debug(a):
dataframe = pd.DataFrame()
df_from_each_file = (pd.read_table(file_, names=["distance","signi"], sep = " ") for file_ in a)
dataframe = pd.concat(df_from_each_file, ignore_index=True)
return dataframe
def filling_df_signal(a):
dataframe = pd.DataFrame()
dummy_df = pd.DataFrame()
b = (pd.read_table(file_, names=["first_row"], nrows=1, sep = " ") for file_ in a)
dummy_df = pd.concat(b, ignore_index=True)
df_from_each_file = (pd.read_table(file_, names=["signi", "multi"], skiprows=1, sep = " ") for file_ in a)
dataframe = pd.concat(df_from_each_file, ignore_index=True)
print("unique of the first rows: ", dummy_df.first_row.unique())
return dataframe
def plot_signi_vs_distance(signi_vs_dist_LL,signi_vs_dist_BH):
fig_signi_vs_distance = plt.figure(figsize=(15,10))
ax= fig_signi_vs_distance.add_subplot(1,1,1)
ax.plot(signi_vs_dist_LL.distance, signi_vs_dist_LL.signi, 'g.', label="LL 2012/0 NH + IH binning=all", alpha=0.5)
ax.plot(signi_vs_dist_BH.distance, signi_vs_dist_BH.signi, 'b.', label="BH 2011/2 NH + IH binning=all", alpha=0.5)
ax.set_title("LL+BH Distribution Signi vs Distance (2012-10) NH+IH - star distribution uniform")
ax.legend()
ax.set_yscale('log')
ax.set_xlabel('Distance [Kpc]')
ax.set_ylabel('Significance')
fig_signi_vs_distance.savefig('plots/signi_vs_dist_LL+BH.png')
def plot_significance(signal_LL, signal_BH):
norm_factor= 0.01
fig_signi = plt.figure(figsize=(15,10))
ax1= fig_signi.add_subplot(1,1,1)
ax1.hist(norm_factor*signal_LL[(norm_factor*signal_LL.signi) < 200].signi.values, bins =100, color="g", alpha=0.5, label="signi*{} LL".format(norm_factor));
ax1.hist(norm_factor*signal_BH[(norm_factor*signal_BH.signi) < 200].signi.values, bins =100, color="b", alpha=0.5, label="signi*{} BH".format(norm_factor));
ax1.set_yscale('log')
ax1.set_xlim(-10,200)
ax1.set_title("Distribution significance signal 2012-10 - star distribution uniform ")
ax1.set_xlabel('Significance')
ax1.legend(loc=2)
fig_signi.savefig('plots/significance.png')
def main():
list_signi_vs_distance_LL=[]
list_signi_vs_distance_BH = []
list_signal_LL = []
list_signal_BH = []
neutr_types=["HI","HN"]
binnings = ["500ms","1.0s", "1.5s", "4s", "10s","all"]
LL_signal= "mount_mogon/201*/signals/*LL*star_distr_0*"
LL_debug= "mount_mogon/201*/debug/*LL*star_distr_0*"
BH_signal= "mount_mogon/201*/signals/*BH*star_distr_0*"
BH_debug= "mount_mogon/201*/debug/*BH*star_distr_0*"
signi_vs_dist_LL = pd.DataFrame()
signal_LL = pd.DataFrame()
signi_vs_dist_BH = pd.DataFrame()
signal_BH = pd.DataFrame()
signi_vs_distance_debug = pd.DataFrame()
mount_mogon()
list_signi_vs_distance_LL = read_data(LL_debug)
list_signal_LL = read_data(LL_signal)
list_signi_vs_distance_BH = read_data(BH_debug)
list_signal_BH = read_data(BH_signal)
print("populating dataframes")
signi_vs_dist_LL = filling_df_debug(list_signi_vs_distance_LL)
signal_LL = filling_df_signal(list_signal_LL)
signi_vs_dist_BH = filling_df_debug(list_signi_vs_distance_BH)
signal_BH = filling_df_signal(list_signal_BH)
print(len(signi_vs_dist_LL.index))
print("start plotting")
plot_signi_vs_distance(signi_vs_dist_LL,signi_vs_dist_BH)
plot_significance(signal_LL,signal_BH)
print("Done.")
if __name__ == "__main__":
main()
| gpl-3.0 |
arjunkhode/ASP | lectures/07-Sinusoidal-plus-residual-model/plots-code/stochasticModelAnalSynth.py | 5 | 1619 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, resample
from scipy.fftpack import fft, ifft
import time
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import utilFunctions as UF
import stochasticModel as STM
import stft as STFT
(fs, x) = UF.wavread('../../../sounds/ocean.wav')
w = np.hamming(512)
N = 512
H = 256
stocf = .1
mYst = STM.stochasticModelAnal(x, H, N, stocf)
y = STM.stochasticModelSynth(mYst, H, N)
mX, pX = STFT.stftAnal(x, w, N, H)
plt.figure(1, figsize=(9, 7))
plt.subplot(411)
plt.plot(np.arange(x.size)/float(fs), x,'b')
plt.title('x (ocean.wav)')
plt.axis([0,x.size/float(fs),min(x),max(x)])
plt.subplot(412)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(mX[0,:].size)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX))
plt.title('mX; M=512, N=512, H=256')
plt.autoscale(tight=True)
plt.subplot(413)
numFrames = int(mYst[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(stocf*mX[0,:].size)*float(fs)/(stocf*N)
plt.pcolormesh(frmTime, binFreq, np.transpose(mYst))
plt.title('mY (stochastic approximation); stocf=.1')
plt.autoscale(tight=True)
plt.subplot(414)
plt.plot(np.arange(y.size)/float(fs), y,'b')
plt.title('y')
plt.axis([0,y.size/float(fs),min(y),max(y)])
plt.tight_layout()
plt.savefig('stochasticModelAnalSynth.png')
UF.wavwrite(y, fs, 'ocean-synthesis.wav')
plt.show()
| agpl-3.0 |
mikofski/pvlib-python | pvlib/iotools/bsrn.py | 3 | 6686 | """Functions to read data from the Baseline Surface Radiation Network (BSRN).
.. codeauthor:: Adam R. Jensen<adam-r-j@hotmail.com>
"""
import pandas as pd
import gzip
COL_SPECS = [(0, 3), (4, 9), (10, 16), (16, 22), (22, 27), (27, 32), (32, 39),
(39, 45), (45, 50), (50, 55), (55, 64), (64, 70), (70, 75)]
BSRN_COLUMNS = ['day', 'minute',
'ghi', 'ghi_std', 'ghi_min', 'ghi_max',
'dni', 'dni_std', 'dni_min', 'dni_max',
'empty', 'empty', 'empty', 'empty', 'empty',
'dhi', 'dhi_std', 'dhi_min', 'dhi_max',
'lwd', 'lwd_std', 'lwd_min', 'lwd_max',
'temp_air', 'relative_humidity', 'pressure']
def read_bsrn(filename):
"""
Read a BSRN station-to-archive file into a DataFrame.
The BSRN (Baseline Surface Radiation Network) is a world wide network
of high-quality solar radiation monitoring stations as described in [1]_.
The function only parses the basic measurements (LR0100), which include
global, diffuse, direct and downwelling long-wave radiation [2]_. Future
updates may include parsing of additional data and meta-data.
BSRN files are freely available and can be accessed via FTP [3]_. Required
username and password are easily obtainable as described in the BSRN's
Data Release Guidelines [4]_.
Parameters
----------
filename: str
A relative or absolute file path.
Returns
-------
data: DataFrame
A DataFrame with the columns as described below. For more extensive
description of the variables, consult [2]_.
Notes
-----
The data DataFrame includes the following fields:
======================= ====== ==========================================
Key Format Description
======================= ====== ==========================================
day int Day of the month 1-31
minute int Minute of the day 0-1439
ghi float Mean global horizontal irradiance [W/m^2]
ghi_std float Std. global horizontal irradiance [W/m^2]
ghi_min float Min. global horizontal irradiance [W/m^2]
ghi_max float Max. global horizontal irradiance [W/m^2]
dni float Mean direct normal irradiance [W/m^2]
dni_std float Std. direct normal irradiance [W/m^2]
dni_min float Min. direct normal irradiance [W/m^2]
dni_max float Max. direct normal irradiance [W/m^2]
dhi float Mean diffuse horizontal irradiance [W/m^2]
dhi_std float Std. diffuse horizontal irradiance [W/m^2]
dhi_min float Min. diffuse horizontal irradiance [W/m^2]
dhi_max float Max. diffuse horizontal irradiance [W/m^2]
lwd float Mean. downward long-wave radiation [W/m^2]
lwd_std float Std. downward long-wave radiation [W/m^2]
lwd_min float Min. downward long-wave radiation [W/m^2]
lwd_max float Max. downward long-wave radiation [W/m^2]
temp_air float Air temperature [°C]
relative_humidity float Relative humidity [%]
pressure float Atmospheric pressure [hPa]
======================= ====== ==========================================
References
----------
.. [1] `World Radiation Monitoring Center - Baseline Surface Radiation
Network (BSRN)
<https://bsrn.awi.de/>`_
.. [2] `Update of the Technical Plan for BSRN Data Management, 2013,
Global Climate Observing System (GCOS) GCOS-172.
<https://bsrn.awi.de/fileadmin/user_upload/bsrn.awi.de/Publications/gcos-174.pdf>`_
.. [3] `BSRN Data Retrieval via FTP
<https://bsrn.awi.de/data/data-retrieval-via-ftp/>`_
.. [4] `BSRN Data Release Guidelines
<https://bsrn.awi.de/data/conditions-of-data-release/>`_
"""
# Read file and store the starting line number for each logical record (LR)
line_no_dict = {}
if str(filename).endswith('.gz'): # check if file is a gzipped (.gz) file
open_func, mode = gzip.open, 'rt'
else:
open_func, mode = open, 'r'
with open_func(filename, mode) as f:
f.readline() # first line should be *U0001, so read it and discard
line_no_dict['0001'] = 0
date_line = f.readline() # second line contains the year and month
start_date = pd.Timestamp(year=int(date_line[7:11]),
month=int(date_line[3:6]), day=1,
tz='UTC') # BSRN timestamps are UTC
for num, line in enumerate(f, start=2):
if line.startswith('*'): # Find start of all logical records
line_no_dict[line[2:6]] = num # key is 4 digit LR number
# Determine start and end line of logical record LR0100 to be parsed
start_row = line_no_dict['0100'] + 1 # Start line number
# If LR0100 is the last logical record, then read rest of file
if start_row-1 == max(line_no_dict.values()):
end_row = num # then parse rest of the file
else: # otherwise parse until the beginning of the next logical record
end_row = min([i for i in line_no_dict.values() if i > start_row]) - 1
nrows = end_row-start_row+1
# Read file as a fixed width file (fwf)
data = pd.read_fwf(filename, skiprows=start_row, nrows=nrows, header=None,
colspecs=COL_SPECS, na_values=[-999.0, -99.9],
compression='infer')
# Create multi-index and unstack, resulting in one column for each variable
data = data.set_index([data.index // 2, data.index % 2])
data = data.unstack(level=1).swaplevel(i=0, j=1, axis='columns')
# Sort columns to match original order and assign column names
data = data.reindex(sorted(data.columns), axis='columns')
data.columns = BSRN_COLUMNS
# Drop empty columns
data = data.drop('empty', axis='columns')
# Change day and minute type to integer
data['day'] = data['day'].astype('Int64')
data['minute'] = data['minute'].astype('Int64')
# Set datetime index
data.index = (start_date
+ pd.to_timedelta(data['day']-1, unit='d')
+ pd.to_timedelta(data['minute'], unit='T'))
return data
| bsd-3-clause |
lbdreyer/iris | docs/iris/gallery_code/general/plot_inset.py | 3 | 2280 | """
Test Data Showing Inset Plots
=============================
This example demonstrates the use of a single 3D data cube with time, latitude
and longitude dimensions to plot a temperature series for a single latitude
coordinate, with an inset plot of the data region.
"""
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import numpy as np
import iris
import iris.quickplot as qplt
import iris.plot as iplt
def main():
cube1 = iris.load_cube(iris.sample_data_path("ostia_monthly.nc"))
# Slice into cube to retrieve data for the inset map showing the
# data region
region = cube1[-1, :, :]
# Average over latitude to reduce cube to 1 dimension
plot_line = region.collapsed("latitude", iris.analysis.MEAN)
# Open a window for plotting
fig = plt.figure()
# Add a single subplot (axes). Could also use "ax_main = plt.subplot()"
ax_main = fig.add_subplot(1, 1, 1)
# Produce a quick plot of the 1D cube
qplt.plot(plot_line)
# Set x limits to match the data
ax_main.set_xlim(0, plot_line.coord("longitude").points.max())
# Adjust the y limits so that the inset map won't clash with main plot
ax_main.set_ylim(294, 310)
ax_main.set_title("Meridional Mean Temperature")
# Add grid lines
ax_main.grid()
# Add a second set of axes specifying the fractional coordinates within
# the figure with bottom left corner at x=0.55, y=0.58 with width
# 0.3 and height 0.25.
# Also specify the projection
ax_sub = fig.add_axes(
[0.55, 0.58, 0.3, 0.25],
projection=ccrs.Mollweide(central_longitude=180),
)
# Use iris.plot (iplt) here so colour bar properties can be specified
# Also use a sequential colour scheme to reduce confusion for those with
# colour-blindness
iplt.pcolormesh(region, cmap="Blues")
# Manually set the orientation and tick marks on your colour bar
ticklist = np.linspace(np.min(region.data), np.max(region.data), 4)
plt.colorbar(orientation="horizontal", ticks=ticklist)
ax_sub.set_title("Data Region")
# Add coastlines
ax_sub.coastlines()
# request to show entire map, using the colour mesh on the data region only
ax_sub.set_global()
qplt.show()
if __name__ == "__main__":
main()
| lgpl-3.0 |
hhuangmeso/cmaps | setup.py | 1 | 2866 | from glob import glob
from setuptools import setup
import os
VERSION = '1.0.3'
CMAPSFILE_DIR = os.path.join('./cmaps/colormaps')
def write_version_py(version=VERSION, filename='cmaps/_version.py'):
cnt = '# THIS FILE IS GENERATED FROM SETUP.PY\n' + \
'__version__ = "%(version)s"\n'
a = open(filename, 'w')
try:
a.write(cnt % {'version': version})
finally:
a.close()
def _listfname():
l = {}
l.update({'ncl': {
'p': 'os.path.join(CMAPSFILE_DIR, "ncar_ncl", ',
'l': sorted(glob(os.path.join(CMAPSFILE_DIR, 'ncar_ncl/*.rgb')))}})
l.update({'self_defined': {
'p': 'os.path.join(CMAPSFILE_DIR, "self_defined", ',
'l': sorted(glob(os.path.join(CMAPSFILE_DIR, 'self_defined/*.rgb')))}})
return l
def write_cmaps(template_file='./cmaps.template'):
with open(template_file, 'rt') as f:
c = f.read()
l = _listfname()
for t in l.keys():
for cmap_file in l[t]['l']:
cname = os.path.basename(cmap_file).split('.rgb')[0]
# start with the number will result illegal attribute
if cname[0].isdigit() or cname.startswith('_'):
cname = 'N' + cname
if '-' in cname:
cname = cname.replace('-', '_')
if '+' in cname:
cname = cname.replace('+', '_')
c += ' @property\n'
c += ' def {}(self):\n'.format(cname)
c += ' cname = "{}"\n'.format(cname)
c += ' cmap_file = {} "{}")\n'.format(
l[t]['p'], os.path.basename(cmap_file))
c += ' cmap = Colormap(self._coltbl(cmap_file), name=cname)\n'
c += ' matplotlib.cm.register_cmap(name=cname, cmap=cmap)\n'
c += ' return cmap\n\n'
c += ' @property\n'
c += ' def {}(self):\n'.format(cname + '_r')
c += ' cname = "{}"\n'.format(cname + '_r')
c += ' cmap_file = {} "{}")\n'.format(
l[t]['p'], os.path.basename(cmap_file))
c += ' cmap = Colormap(self._coltbl(cmap_file)[::-1], name=cname)\n'
c += ' matplotlib.cm.register_cmap(name=cname, cmap=cmap)\n'
c += ' return cmap\n\n'
cmapspy = './cmaps/cmaps.py'
with open(cmapspy, 'wt') as fw:
fw.write(c)
write_version_py()
write_cmaps()
setup(
name='cmaps',
author='Hao Huang',
version=VERSION,
author_email='hhuangwx@gmail.com',
packages=['cmaps', ],
package_data={'cmaps': ['colormaps/ncar_ncl/*',
'colormaps/self_defined/*'], },
data_files=[('', ['cmaps.template', 'LICENSE']),],
url='',
license='LICENSE',
description='',
long_description='',
install_requires=['matplotlib', 'numpy'],
)
| gpl-3.0 |
kelseyoo14/Wander | venv_2_7/lib/python2.7/site-packages/pandas/core/ops.py | 9 | 48430 | """
Arithmetic operations for PandasObjects
This is not a public API.
"""
# necessary to enforce truediv in Python 2.X
from __future__ import division
import operator
import warnings
import numpy as np
import pandas as pd
import datetime
from pandas import compat, lib, tslib
import pandas.index as _index
from pandas.util.decorators import Appender
import pandas.core.common as com
import pandas.computation.expressions as expressions
from pandas.lib import isscalar
from pandas.tslib import iNaT
from pandas.compat import bind_method
from pandas.core.common import(is_list_like, notnull, isnull,
_values_from_object, _maybe_match_name,
needs_i8_conversion, is_datetimelike_v_numeric,
is_integer_dtype, is_categorical_dtype, is_object_dtype,
is_timedelta64_dtype, is_datetime64_dtype, is_datetime64tz_dtype,
is_bool_dtype)
from pandas.io.common import PerformanceWarning
# -----------------------------------------------------------------------------
# Functions that add arithmetic methods to objects, given arithmetic factory
# methods
def _create_methods(arith_method, radd_func, comp_method, bool_method,
use_numexpr, special=False, default_axis='columns'):
# creates actual methods based upon arithmetic, comp and bool method
# constructors.
# NOTE: Only frame cares about default_axis, specifically: special methods
# have default axis None, whereas flex methods have default axis 'columns'
# if we're not using numexpr, then don't pass a str_rep
if use_numexpr:
op = lambda x: x
else:
op = lambda x: None
if special:
def names(x):
if x[-1] == "_":
return "__%s_" % x
else:
return "__%s__" % x
else:
names = lambda x: x
radd_func = radd_func or operator.add
# Inframe, all special methods have default_axis=None, flex methods have
# default_axis set to the default (columns)
new_methods = dict(
add=arith_method(operator.add, names('add'), op('+'),
default_axis=default_axis),
radd=arith_method(radd_func, names('radd'), op('+'),
default_axis=default_axis),
sub=arith_method(operator.sub, names('sub'), op('-'),
default_axis=default_axis),
mul=arith_method(operator.mul, names('mul'), op('*'),
default_axis=default_axis),
truediv=arith_method(operator.truediv, names('truediv'), op('/'),
truediv=True, fill_zeros=np.inf,
default_axis=default_axis),
floordiv=arith_method(operator.floordiv, names('floordiv'), op('//'),
default_axis=default_axis, fill_zeros=np.inf),
# Causes a floating point exception in the tests when numexpr
# enabled, so for now no speedup
mod=arith_method(operator.mod, names('mod'), None,
default_axis=default_axis, fill_zeros=np.nan),
pow=arith_method(operator.pow, names('pow'), op('**'),
default_axis=default_axis),
# not entirely sure why this is necessary, but previously was included
# so it's here to maintain compatibility
rmul=arith_method(operator.mul, names('rmul'), op('*'),
default_axis=default_axis, reversed=True),
rsub=arith_method(lambda x, y: y - x, names('rsub'), op('-'),
default_axis=default_axis, reversed=True),
rtruediv=arith_method(lambda x, y: operator.truediv(y, x),
names('rtruediv'), op('/'), truediv=True,
fill_zeros=np.inf, default_axis=default_axis,
reversed=True),
rfloordiv=arith_method(lambda x, y: operator.floordiv(y, x),
names('rfloordiv'), op('//'),
default_axis=default_axis, fill_zeros=np.inf,
reversed=True),
rpow=arith_method(lambda x, y: y ** x, names('rpow'), op('**'),
default_axis=default_axis, reversed=True),
rmod=arith_method(lambda x, y: y % x, names('rmod'), op('%'),
default_axis=default_axis, fill_zeros=np.nan,
reversed=True),
)
new_methods['div'] = new_methods['truediv']
new_methods['rdiv'] = new_methods['rtruediv']
# Comp methods never had a default axis set
if comp_method:
new_methods.update(dict(
eq=comp_method(operator.eq, names('eq'), op('==')),
ne=comp_method(operator.ne, names('ne'), op('!='), masker=True),
lt=comp_method(operator.lt, names('lt'), op('<')),
gt=comp_method(operator.gt, names('gt'), op('>')),
le=comp_method(operator.le, names('le'), op('<=')),
ge=comp_method(operator.ge, names('ge'), op('>=')),
))
if bool_method:
new_methods.update(dict(
and_=bool_method(operator.and_, names('and_'), op('&')),
or_=bool_method(operator.or_, names('or_'), op('|')),
# For some reason ``^`` wasn't used in original.
xor=bool_method(operator.xor, names('xor'), op('^')),
rand_=bool_method(lambda x, y: operator.and_(y, x),
names('rand_'), op('&')),
ror_=bool_method(lambda x, y: operator.or_(y, x), names('ror_'), op('|')),
rxor=bool_method(lambda x, y: operator.xor(y, x), names('rxor'), op('^'))
))
new_methods = dict((names(k), v) for k, v in new_methods.items())
return new_methods
def add_methods(cls, new_methods, force, select, exclude):
if select and exclude:
raise TypeError("May only pass either select or exclude")
methods = new_methods
if select:
select = set(select)
methods = {}
for key, method in new_methods.items():
if key in select:
methods[key] = method
if exclude:
for k in exclude:
new_methods.pop(k, None)
for name, method in new_methods.items():
if force or name not in cls.__dict__:
bind_method(cls, name, method)
#----------------------------------------------------------------------
# Arithmetic
def add_special_arithmetic_methods(cls, arith_method=None, radd_func=None,
comp_method=None, bool_method=None,
use_numexpr=True, force=False, select=None,
exclude=None):
"""
Adds the full suite of special arithmetic methods (``__add__``,
``__sub__``, etc.) to the class.
Parameters
----------
arith_method : function (optional)
factory for special arithmetic methods, with op string:
f(op, name, str_rep, default_axis=None, fill_zeros=None, **eval_kwargs)
radd_func : function (optional)
Possible replacement for ``operator.add`` for compatibility
comp_method : function, optional,
factory for rich comparison - signature: f(op, name, str_rep)
use_numexpr : bool, default True
whether to accelerate with numexpr, defaults to True
force : bool, default False
if False, checks whether function is defined **on ``cls.__dict__``**
before defining if True, always defines functions on class base
select : iterable of strings (optional)
if passed, only sets functions with names in select
exclude : iterable of strings (optional)
if passed, will not set functions with names in exclude
"""
radd_func = radd_func or operator.add
# in frame, special methods have default_axis = None, comp methods use
# 'columns'
new_methods = _create_methods(arith_method, radd_func, comp_method,
bool_method, use_numexpr, default_axis=None,
special=True)
# inplace operators (I feel like these should get passed an `inplace=True`
# or just be removed
def _wrap_inplace_method(method):
"""
return an inplace wrapper for this method
"""
def f(self, other):
result = method(self, other)
# this makes sure that we are aligned like the input
# we are updating inplace so we want to ignore is_copy
self._update_inplace(result.reindex_like(self,copy=False)._data,
verify_is_copy=False)
return self
return f
new_methods.update(dict(
__iadd__=_wrap_inplace_method(new_methods["__add__"]),
__isub__=_wrap_inplace_method(new_methods["__sub__"]),
__imul__=_wrap_inplace_method(new_methods["__mul__"]),
__itruediv__=_wrap_inplace_method(new_methods["__truediv__"]),
__ipow__=_wrap_inplace_method(new_methods["__pow__"]),
))
if not compat.PY3:
new_methods["__idiv__"] = new_methods["__div__"]
add_methods(cls, new_methods=new_methods, force=force, select=select,
exclude=exclude)
def add_flex_arithmetic_methods(cls, flex_arith_method, radd_func=None,
flex_comp_method=None, flex_bool_method=None,
use_numexpr=True, force=False, select=None,
exclude=None):
"""
Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``)
to the class.
Parameters
----------
flex_arith_method : function
factory for special arithmetic methods, with op string:
f(op, name, str_rep, default_axis=None, fill_zeros=None, **eval_kwargs)
radd_func : function (optional)
Possible replacement for ``lambda x, y: operator.add(y, x)`` for
compatibility
flex_comp_method : function, optional,
factory for rich comparison - signature: f(op, name, str_rep)
use_numexpr : bool, default True
whether to accelerate with numexpr, defaults to True
force : bool, default False
if False, checks whether function is defined **on ``cls.__dict__``**
before defining if True, always defines functions on class base
select : iterable of strings (optional)
if passed, only sets functions with names in select
exclude : iterable of strings (optional)
if passed, will not set functions with names in exclude
"""
radd_func = radd_func or (lambda x, y: operator.add(y, x))
# in frame, default axis is 'columns', doesn't matter for series and panel
new_methods = _create_methods(
flex_arith_method, radd_func, flex_comp_method, flex_bool_method,
use_numexpr, default_axis='columns', special=False)
new_methods.update(dict(
multiply=new_methods['mul'],
subtract=new_methods['sub'],
divide=new_methods['div']
))
# opt out of bool flex methods for now
for k in ('ror_', 'rxor', 'rand_'):
if k in new_methods:
new_methods.pop(k)
add_methods(cls, new_methods=new_methods, force=force, select=select,
exclude=exclude)
class _TimeOp(object):
"""
Wrapper around Series datetime/time/timedelta arithmetic operations.
Generally, you should use classmethod ``maybe_convert_for_time_op`` as an
entry point.
"""
fill_value = iNaT
wrap_results = staticmethod(lambda x: x)
dtype = None
def __init__(self, left, right, name, na_op):
# need to make sure that we are aligning the data
if isinstance(left, pd.Series) and isinstance(right, pd.Series):
left, right = left.align(right,copy=False)
lvalues = self._convert_to_array(left, name=name)
rvalues = self._convert_to_array(right, name=name, other=lvalues)
self.name = name
self.na_op = na_op
# left
self.left = left
self.is_offset_lhs = self._is_offset(left)
self.is_timedelta_lhs = is_timedelta64_dtype(lvalues)
self.is_datetime64_lhs = is_datetime64_dtype(lvalues)
self.is_datetime64tz_lhs = is_datetime64tz_dtype(lvalues)
self.is_datetime_lhs = self.is_datetime64_lhs or self.is_datetime64tz_lhs
self.is_integer_lhs = left.dtype.kind in ['i', 'u']
# right
self.right = right
self.is_offset_rhs = self._is_offset(right)
self.is_datetime64_rhs = is_datetime64_dtype(rvalues)
self.is_datetime64tz_rhs = is_datetime64tz_dtype(rvalues)
self.is_datetime_rhs = self.is_datetime64_rhs or self.is_datetime64tz_rhs
self.is_timedelta_rhs = is_timedelta64_dtype(rvalues)
self.is_integer_rhs = rvalues.dtype.kind in ('i', 'u')
self._validate(lvalues, rvalues, name)
self.lvalues, self.rvalues = self._convert_for_datetime(lvalues, rvalues)
def _validate(self, lvalues, rvalues, name):
# timedelta and integer mul/div
if (self.is_timedelta_lhs and self.is_integer_rhs) or (
self.is_integer_lhs and self.is_timedelta_rhs):
if name not in ('__div__', '__truediv__', '__mul__'):
raise TypeError("can only operate on a timedelta and an "
"integer for division, but the operator [%s]"
"was passed" % name)
# 2 datetimes
elif self.is_datetime_lhs and self.is_datetime_rhs:
if name not in ('__sub__','__rsub__'):
raise TypeError("can only operate on a datetimes for"
" subtraction, but the operator [%s] was"
" passed" % name)
# if tz's must be equal (same or None)
if getattr(lvalues,'tz',None) != getattr(rvalues,'tz',None):
raise ValueError("Incompatbile tz's on datetime subtraction ops")
# 2 timedeltas
elif ((self.is_timedelta_lhs and
(self.is_timedelta_rhs or self.is_offset_rhs)) or
(self.is_timedelta_rhs and
(self.is_timedelta_lhs or self.is_offset_lhs))):
if name not in ('__div__', '__rdiv__', '__truediv__', '__rtruediv__',
'__add__', '__radd__', '__sub__', '__rsub__'):
raise TypeError("can only operate on a timedeltas for "
"addition, subtraction, and division, but the"
" operator [%s] was passed" % name)
# datetime and timedelta/DateOffset
elif (self.is_datetime_lhs and
(self.is_timedelta_rhs or self.is_offset_rhs)):
if name not in ('__add__', '__radd__', '__sub__'):
raise TypeError("can only operate on a datetime with a rhs of"
" a timedelta/DateOffset for addition and subtraction,"
" but the operator [%s] was passed" %
name)
elif ((self.is_timedelta_lhs or self.is_offset_lhs)
and self.is_datetime_rhs):
if name not in ('__add__', '__radd__'):
raise TypeError("can only operate on a timedelta/DateOffset and"
" a datetime for addition, but the operator"
" [%s] was passed" % name)
else:
raise TypeError('cannot operate on a series with out a rhs '
'of a series/ndarray of type datetime64[ns] '
'or a timedelta')
def _convert_to_array(self, values, name=None, other=None):
"""converts values to ndarray"""
from pandas.tseries.timedeltas import to_timedelta
ovalues = values
if not is_list_like(values):
values = np.array([values])
inferred_type = lib.infer_dtype(values)
if inferred_type in ('datetime64', 'datetime', 'date', 'time'):
# if we have a other of timedelta, but use pd.NaT here we
# we are in the wrong path
if (other is not None and other.dtype == 'timedelta64[ns]' and
all(isnull(v) for v in values)):
values = np.empty(values.shape, dtype=other.dtype)
values[:] = iNaT
# a datelike
elif isinstance(values, pd.DatetimeIndex):
values = values.to_series()
# datetime with tz
elif isinstance(ovalues, datetime.datetime) and hasattr(ovalues,'tz'):
values = pd.DatetimeIndex(values)
# datetime array with tz
elif com.is_datetimetz(values):
if isinstance(values, pd.Series):
values = values._values
elif not (isinstance(values, (np.ndarray, pd.Series)) and
is_datetime64_dtype(values)):
values = tslib.array_to_datetime(values)
elif inferred_type in ('timedelta', 'timedelta64'):
# have a timedelta, convert to to ns here
values = to_timedelta(values, errors='coerce')
elif inferred_type == 'integer':
# py3 compat where dtype is 'm' but is an integer
if values.dtype.kind == 'm':
values = values.astype('timedelta64[ns]')
elif isinstance(values, pd.PeriodIndex):
values = values.to_timestamp().to_series()
elif name not in ('__truediv__', '__div__', '__mul__'):
raise TypeError("incompatible type for a datetime/timedelta "
"operation [{0}]".format(name))
elif inferred_type == 'floating':
# all nan, so ok, use the other dtype (e.g. timedelta or datetime)
if isnull(values).all():
values = np.empty(values.shape, dtype=other.dtype)
values[:] = iNaT
else:
raise TypeError(
'incompatible type [{0}] for a datetime/timedelta '
'operation'.format(np.array(values).dtype))
elif self._is_offset(values):
return values
else:
raise TypeError("incompatible type [{0}] for a datetime/timedelta"
" operation".format(np.array(values).dtype))
return values
def _convert_for_datetime(self, lvalues, rvalues):
from pandas.tseries.timedeltas import to_timedelta
mask = isnull(lvalues) | isnull(rvalues)
# datetimes require views
if self.is_datetime_lhs or self.is_datetime_rhs:
# datetime subtraction means timedelta
if self.is_datetime_lhs and self.is_datetime_rhs:
self.dtype = 'timedelta64[ns]'
elif self.is_datetime64tz_lhs:
self.dtype = lvalues.dtype
elif self.is_datetime64tz_rhs:
self.dtype = rvalues.dtype
else:
self.dtype = 'datetime64[ns]'
# if adding single offset try vectorized path
# in DatetimeIndex; otherwise elementwise apply
def _offset(lvalues, rvalues):
if len(lvalues) == 1:
rvalues = pd.DatetimeIndex(rvalues)
lvalues = lvalues[0]
else:
warnings.warn("Adding/subtracting array of DateOffsets to Series not vectorized",
PerformanceWarning)
rvalues = rvalues.astype('O')
# pass thru on the na_op
self.na_op = lambda x, y: getattr(x,self.name)(y)
return lvalues, rvalues
if self.is_offset_lhs:
lvalues, rvalues = _offset(lvalues, rvalues)
elif self.is_offset_rhs:
rvalues, lvalues = _offset(rvalues, lvalues)
else:
# with tz, convert to UTC
if self.is_datetime64tz_lhs:
lvalues = lvalues.tz_localize(None)
if self.is_datetime64tz_rhs:
rvalues = rvalues.tz_localize(None)
lvalues = lvalues.view(np.int64)
rvalues = rvalues.view(np.int64)
# otherwise it's a timedelta
else:
self.dtype = 'timedelta64[ns]'
# convert Tick DateOffset to underlying delta
if self.is_offset_lhs:
lvalues = to_timedelta(lvalues)
if self.is_offset_rhs:
rvalues = to_timedelta(rvalues)
lvalues = lvalues.astype(np.int64)
rvalues = rvalues.astype(np.int64)
# time delta division -> unit less
# integer gets converted to timedelta in np < 1.6
if (self.is_timedelta_lhs and self.is_timedelta_rhs) and\
not self.is_integer_rhs and\
not self.is_integer_lhs and\
self.name in ('__div__', '__truediv__'):
self.dtype = 'float64'
self.fill_value = np.nan
lvalues = lvalues.astype(np.float64)
rvalues = rvalues.astype(np.float64)
# if we need to mask the results
if mask.any():
def f(x):
# datetime64[ns]/timedelta64[ns] masking
try:
x = np.array(x, dtype=self.dtype)
except TypeError:
x = np.array(x, dtype='datetime64[ns]')
np.putmask(x, mask, self.fill_value)
return x
self.wrap_results = f
return lvalues, rvalues
def _is_offset(self, arr_or_obj):
""" check if obj or all elements of list-like is DateOffset """
if isinstance(arr_or_obj, pd.DateOffset):
return True
elif is_list_like(arr_or_obj):
return all(isinstance(x, pd.DateOffset) for x in arr_or_obj)
else:
return False
@classmethod
def maybe_convert_for_time_op(cls, left, right, name, na_op):
"""
if ``left`` and ``right`` are appropriate for datetime arithmetic with
operation ``name``, processes them and returns a ``_TimeOp`` object
that stores all the required values. Otherwise, it will generate
either a ``NotImplementedError`` or ``None``, indicating that the
operation is unsupported for datetimes (e.g., an unsupported r_op) or
that the data is not the right type for time ops.
"""
# decide if we can do it
is_timedelta_lhs = is_timedelta64_dtype(left)
is_datetime_lhs = is_datetime64_dtype(left) or is_datetime64tz_dtype(left)
if not (is_datetime_lhs or is_timedelta_lhs):
return None
return cls(left, right, name, na_op)
def _arith_method_SERIES(op, name, str_rep, fill_zeros=None,
default_axis=None, **eval_kwargs):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True, **eval_kwargs)
except TypeError:
if isinstance(y, (np.ndarray, pd.Series, pd.Index)):
dtype = np.find_common_type([x.dtype, y.dtype], [])
result = np.empty(x.size, dtype=dtype)
mask = notnull(x) & notnull(y)
result[mask] = op(x[mask], _values_from_object(y[mask]))
elif isinstance(x, np.ndarray):
result = np.empty(len(x), dtype=x.dtype)
mask = notnull(x)
result[mask] = op(x[mask], y)
else:
raise TypeError("{typ} cannot perform the operation {op}".format(typ=type(x).__name__,op=str_rep))
result, changed = com._maybe_upcast_putmask(result, ~mask, np.nan)
result = com._fill_zeros(result, x, y, name, fill_zeros)
return result
def wrapper(left, right, name=name, na_op=na_op):
if isinstance(right, pd.DataFrame):
return NotImplemented
time_converted = _TimeOp.maybe_convert_for_time_op(left, right, name, na_op)
if time_converted is None:
lvalues, rvalues = left, right
dtype = None
wrap_results = lambda x: x
elif time_converted == NotImplemented:
return NotImplemented
else:
left, right = time_converted.left, time_converted.right
lvalues, rvalues = time_converted.lvalues, time_converted.rvalues
dtype = time_converted.dtype
wrap_results = time_converted.wrap_results
na_op = time_converted.na_op
if isinstance(rvalues, pd.Series):
rindex = getattr(rvalues,'index',rvalues)
name = _maybe_match_name(left, rvalues)
lvalues = getattr(lvalues, 'values', lvalues)
rvalues = getattr(rvalues, 'values', rvalues)
if left.index.equals(rindex):
index = left.index
else:
index, lidx, ridx = left.index.join(rindex, how='outer',
return_indexers=True)
if lidx is not None:
lvalues = com.take_1d(lvalues, lidx)
if ridx is not None:
rvalues = com.take_1d(rvalues, ridx)
arr = na_op(lvalues, rvalues)
return left._constructor(wrap_results(arr), index=index,
name=name, dtype=dtype)
else:
# scalars
if hasattr(lvalues, 'values') and not isinstance(lvalues, pd.DatetimeIndex):
lvalues = lvalues.values
return left._constructor(wrap_results(na_op(lvalues, rvalues)),
index=left.index, name=left.name,
dtype=dtype)
return wrapper
def _comp_method_SERIES(op, name, str_rep, masker=False):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def na_op(x, y):
# dispatch to the categorical if we have a categorical
# in either operand
if is_categorical_dtype(x):
return op(x,y)
elif is_categorical_dtype(y) and not isscalar(y):
return op(y,x)
if is_object_dtype(x.dtype):
if isinstance(y, list):
y = lib.list_to_object_array(y)
if isinstance(y, (np.ndarray, pd.Series)):
if not is_object_dtype(y.dtype):
result = lib.vec_compare(x, y.astype(np.object_), op)
else:
result = lib.vec_compare(x, y, op)
else:
result = lib.scalar_compare(x, y, op)
else:
# we want to compare like types
# we only want to convert to integer like if
# we are not NotImplemented, otherwise
# we would allow datetime64 (but viewed as i8) against
# integer comparisons
if is_datetimelike_v_numeric(x, y):
raise TypeError("invalid type comparison")
# numpy does not like comparisons vs None
if isscalar(y) and isnull(y):
if name == '__ne__':
return np.ones(len(x), dtype=bool)
else:
return np.zeros(len(x), dtype=bool)
# we have a datetime/timedelta and may need to convert
mask = None
if needs_i8_conversion(x) or (not isscalar(y) and needs_i8_conversion(y)):
if isscalar(y):
y = _index.convert_scalar(x,_values_from_object(y))
else:
y = y.view('i8')
mask = isnull(x)
x = x.view('i8')
try:
result = getattr(x, name)(y)
if result is NotImplemented:
raise TypeError("invalid type comparison")
except AttributeError:
result = op(x, y)
if mask is not None and mask.any():
result[mask] = masker
return result
def wrapper(self, other, axis=None):
# Validate the axis parameter
if axis is not None:
self._get_axis_number(axis)
if isinstance(other, pd.Series):
name = _maybe_match_name(self, other)
if len(self) != len(other):
raise ValueError('Series lengths must match to compare')
return self._constructor(na_op(self.values, other.values),
index=self.index, name=name)
elif isinstance(other, pd.DataFrame): # pragma: no cover
return NotImplemented
elif isinstance(other, (np.ndarray, pd.Index)):
if len(self) != len(other):
raise ValueError('Lengths must match to compare')
return self._constructor(na_op(self.values, np.asarray(other)),
index=self.index).__finalize__(self)
elif isinstance(other, pd.Categorical):
if not is_categorical_dtype(self):
msg = "Cannot compare a Categorical for op {op} with Series of dtype {typ}.\n"\
"If you want to compare values, use 'series <op> np.asarray(other)'."
raise TypeError(msg.format(op=op,typ=self.dtype))
if is_categorical_dtype(self):
# cats are a special case as get_values() would return an ndarray, which would then
# not take categories ordering into account
# we can go directly to op, as the na_op would just test again and dispatch to it.
res = op(self.values, other)
else:
values = self.get_values()
if isinstance(other, (list, np.ndarray)):
other = np.asarray(other)
res = na_op(values, other)
if isscalar(res):
raise TypeError('Could not compare %s type with Series'
% type(other))
# always return a full value series here
res = _values_from_object(res)
res = pd.Series(res, index=self.index, name=self.name,
dtype='bool')
return res
return wrapper
def _bool_method_SERIES(op, name, str_rep):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def na_op(x, y):
try:
result = op(x, y)
except TypeError:
if isinstance(y, list):
y = lib.list_to_object_array(y)
if isinstance(y, (np.ndarray, pd.Series)):
if (is_bool_dtype(x.dtype) and is_bool_dtype(y.dtype)):
result = op(x, y) # when would this be hit?
else:
x = com._ensure_object(x)
y = com._ensure_object(y)
result = lib.vec_binop(x, y, op)
else:
try:
# let null fall thru
if not isnull(y):
y = bool(y)
result = lib.scalar_binop(x, y, op)
except:
raise TypeError("cannot compare a dtyped [{0}] array with "
"a scalar of type [{1}]".format(
x.dtype, type(y).__name__))
return result
def wrapper(self, other):
is_self_int_dtype = is_integer_dtype(self.dtype)
fill_int = lambda x: x.fillna(0)
fill_bool = lambda x: x.fillna(False).astype(bool)
if isinstance(other, pd.Series):
name = _maybe_match_name(self, other)
other = other.reindex_like(self)
is_other_int_dtype = is_integer_dtype(other.dtype)
other = fill_int(other) if is_other_int_dtype else fill_bool(other)
filler = fill_int if is_self_int_dtype and is_other_int_dtype else fill_bool
return filler(self._constructor(na_op(self.values, other.values),
index=self.index,
name=name))
elif isinstance(other, pd.DataFrame):
return NotImplemented
else:
# scalars, list, tuple, np.array
filler = fill_int if is_self_int_dtype and is_integer_dtype(np.asarray(other)) else fill_bool
return filler(self._constructor(na_op(self.values, other),
index=self.index)).__finalize__(self)
return wrapper
def _radd_compat(left, right):
radd = lambda x, y: y + x
# GH #353, NumPy 1.5.1 workaround
try:
output = radd(left, right)
except TypeError:
raise
return output
_op_descriptions = {'add': {'op': '+', 'desc': 'Addition', 'reversed': False, 'reverse': 'radd'},
'sub': {'op': '-', 'desc': 'Subtraction', 'reversed': False, 'reverse': 'rsub'},
'mul': {'op': '*', 'desc': 'Multiplication', 'reversed': False, 'reverse': 'rmul'},
'mod': {'op': '%', 'desc': 'Modulo', 'reversed': False, 'reverse': 'rmod'},
'pow': {'op': '**', 'desc': 'Exponential power', 'reversed': False, 'reverse': 'rpow'},
'truediv': {'op': '/', 'desc': 'Floating division', 'reversed': False, 'reverse': 'rtruediv'},
'floordiv': {'op': '//', 'desc': 'Integer division', 'reversed': False, 'reverse': 'rfloordiv'}}
_op_names = list(_op_descriptions.keys())
for k in _op_names:
reverse_op = _op_descriptions[k]['reverse']
_op_descriptions[reverse_op] = _op_descriptions[k].copy()
_op_descriptions[reverse_op]['reversed'] = True
_op_descriptions[reverse_op]['reverse'] = k
def _flex_method_SERIES(op, name, str_rep, default_axis=None,
fill_zeros=None, **eval_kwargs):
op_name = name.replace('__', '')
op_desc = _op_descriptions[op_name]
if op_desc['reversed']:
equiv = 'other ' + op_desc['op'] + ' series'
else:
equiv = 'series ' + op_desc['op'] + ' other'
doc = """
%s of series and other, element-wise (binary operator `%s`).
Equivalent to ``%s``, but with support to substitute a fill_value for
missing data in one of the inputs.
Parameters
----------
other: Series or scalar value
fill_value : None or float value, default None (NaN)
Fill missing (NaN) values with this value. If both Series are
missing, the result will be missing
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
Returns
-------
result : Series
See also
--------
Series.%s
""" % (op_desc['desc'], op_name, equiv, op_desc['reverse'])
@Appender(doc)
def flex_wrapper(self, other, level=None, fill_value=None, axis=0):
# validate axis
self._get_axis_number(axis)
if isinstance(other, pd.Series):
return self._binop(other, op, level=level, fill_value=fill_value)
elif isinstance(other, (np.ndarray, pd.Series, list, tuple)):
if len(other) != len(self):
raise ValueError('Lengths must be equal')
return self._binop(self._constructor(other, self.index), op,
level=level, fill_value=fill_value)
else:
return self._constructor(op(self.values, other),
self.index).__finalize__(self)
flex_wrapper.__name__ = name
return flex_wrapper
series_flex_funcs = dict(flex_arith_method=_flex_method_SERIES,
radd_func=_radd_compat,
flex_comp_method=_comp_method_SERIES)
series_special_funcs = dict(arith_method=_arith_method_SERIES,
radd_func=_radd_compat,
comp_method=_comp_method_SERIES,
bool_method=_bool_method_SERIES)
_arith_doc_FRAME = """
Binary operator %s with support to substitute a fill_value for missing data in
one of the inputs
Parameters
----------
other : Series, DataFrame, or constant
axis : {0, 1, 'index', 'columns'}
For Series input, axis to match Series index on
fill_value : None or float value, default None
Fill missing (NaN) values with this value. If both DataFrame locations are
missing, the result will be missing
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
Notes
-----
Mismatched indices will be unioned together
Returns
-------
result : DataFrame
"""
def _arith_method_FRAME(op, name, str_rep=None, default_axis='columns',
fill_zeros=None, **eval_kwargs):
def na_op(x, y):
try:
result = expressions.evaluate(
op, str_rep, x, y, raise_on_error=True, **eval_kwargs)
except TypeError:
xrav = x.ravel()
if isinstance(y, (np.ndarray, pd.Series)):
dtype = np.find_common_type([x.dtype, y.dtype], [])
result = np.empty(x.size, dtype=dtype)
yrav = y.ravel()
mask = notnull(xrav) & notnull(yrav)
xrav = xrav[mask]
yrav = yrav[mask]
if np.prod(xrav.shape) and np.prod(yrav.shape):
result[mask] = op(xrav, yrav)
elif hasattr(x,'size'):
result = np.empty(x.size, dtype=x.dtype)
mask = notnull(xrav)
xrav = xrav[mask]
if np.prod(xrav.shape):
result[mask] = op(xrav, y)
else:
raise TypeError("cannot perform operation {op} between objects "
"of type {x} and {y}".format(op=name,x=type(x),y=type(y)))
result, changed = com._maybe_upcast_putmask(result, ~mask, np.nan)
result = result.reshape(x.shape)
result = com._fill_zeros(result, x, y, name, fill_zeros)
return result
if name in _op_descriptions:
op_name = name.replace('__', '')
op_desc = _op_descriptions[op_name]
if op_desc['reversed']:
equiv = 'other ' + op_desc['op'] + ' dataframe'
else:
equiv = 'dataframe ' + op_desc['op'] + ' other'
doc = """
%s of dataframe and other, element-wise (binary operator `%s`).
Equivalent to ``%s``, but with support to substitute a fill_value for
missing data in one of the inputs.
Parameters
----------
other : Series, DataFrame, or constant
axis : {0, 1, 'index', 'columns'}
For Series input, axis to match Series index on
fill_value : None or float value, default None
Fill missing (NaN) values with this value. If both DataFrame locations are
missing, the result will be missing
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
Notes
-----
Mismatched indices will be unioned together
Returns
-------
result : DataFrame
See also
--------
DataFrame.%s
""" % (op_desc['desc'], op_name, equiv, op_desc['reverse'])
else:
doc = _arith_doc_FRAME % name
@Appender(doc)
def f(self, other, axis=default_axis, level=None, fill_value=None):
if isinstance(other, pd.DataFrame): # Another DataFrame
return self._combine_frame(other, na_op, fill_value, level)
elif isinstance(other, pd.Series):
return self._combine_series(other, na_op, fill_value, axis, level)
elif isinstance(other, (list, tuple)):
if axis is not None and self._get_axis_name(axis) == 'index':
# TODO: Get all of these to use _constructor_sliced
# casted = self._constructor_sliced(other, index=self.index)
casted = pd.Series(other, index=self.index)
else:
# casted = self._constructor_sliced(other, index=self.columns)
casted = pd.Series(other, index=self.columns)
return self._combine_series(casted, na_op, fill_value, axis, level)
elif isinstance(other, np.ndarray) and other.ndim: # skips np scalar
if other.ndim == 1:
if axis is not None and self._get_axis_name(axis) == 'index':
# casted = self._constructor_sliced(other,
# index=self.index)
casted = pd.Series(other, index=self.index)
else:
# casted = self._constructor_sliced(other,
# index=self.columns)
casted = pd.Series(other, index=self.columns)
return self._combine_series(casted, na_op, fill_value,
axis, level)
elif other.ndim == 2:
# casted = self._constructor(other, index=self.index,
# columns=self.columns)
casted = pd.DataFrame(other, index=self.index,
columns=self.columns)
return self._combine_frame(casted, na_op, fill_value, level)
else:
raise ValueError("Incompatible argument shape: %s" %
(other.shape, ))
else:
return self._combine_const(other, na_op)
f.__name__ = name
return f
# Masker unused for now
def _flex_comp_method_FRAME(op, name, str_rep=None, default_axis='columns',
masker=False):
def na_op(x, y):
try:
result = op(x, y)
except TypeError:
xrav = x.ravel()
result = np.empty(x.size, dtype=x.dtype)
if isinstance(y, (np.ndarray, pd.Series)):
yrav = y.ravel()
mask = notnull(xrav) & notnull(yrav)
result[mask] = op(np.array(list(xrav[mask])),
np.array(list(yrav[mask])))
else:
mask = notnull(xrav)
result[mask] = op(np.array(list(xrav[mask])), y)
if op == operator.ne: # pragma: no cover
np.putmask(result, ~mask, True)
else:
np.putmask(result, ~mask, False)
result = result.reshape(x.shape)
return result
@Appender('Wrapper for flexible comparison methods %s' % name)
def f(self, other, axis=default_axis, level=None):
if isinstance(other, pd.DataFrame): # Another DataFrame
return self._flex_compare_frame(other, na_op, str_rep, level)
elif isinstance(other, pd.Series):
return self._combine_series(other, na_op, None, axis, level)
elif isinstance(other, (list, tuple)):
if axis is not None and self._get_axis_name(axis) == 'index':
casted = pd.Series(other, index=self.index)
else:
casted = pd.Series(other, index=self.columns)
return self._combine_series(casted, na_op, None, axis, level)
elif isinstance(other, np.ndarray):
if other.ndim == 1:
if axis is not None and self._get_axis_name(axis) == 'index':
casted = pd.Series(other, index=self.index)
else:
casted = pd.Series(other, index=self.columns)
return self._combine_series(casted, na_op, None, axis, level)
elif other.ndim == 2:
casted = pd.DataFrame(other, index=self.index,
columns=self.columns)
return self._flex_compare_frame(casted, na_op, str_rep, level)
else:
raise ValueError("Incompatible argument shape: %s" %
(other.shape, ))
else:
return self._combine_const(other, na_op)
f.__name__ = name
return f
def _comp_method_FRAME(func, name, str_rep, masker=False):
@Appender('Wrapper for comparison method %s' % name)
def f(self, other):
if isinstance(other, pd.DataFrame): # Another DataFrame
return self._compare_frame(other, func, str_rep)
elif isinstance(other, pd.Series):
return self._combine_series_infer(other, func)
else:
# straight boolean comparisions we want to allow all columns
# (regardless of dtype to pass thru) See #4537 for discussion.
res = self._combine_const(other, func, raise_on_error=False)
return res.fillna(True).astype(bool)
f.__name__ = name
return f
frame_flex_funcs = dict(flex_arith_method=_arith_method_FRAME,
radd_func=_radd_compat,
flex_comp_method=_flex_comp_method_FRAME)
frame_special_funcs = dict(arith_method=_arith_method_FRAME,
radd_func=_radd_compat,
comp_method=_comp_method_FRAME,
bool_method=_arith_method_FRAME)
def _arith_method_PANEL(op, name, str_rep=None, fill_zeros=None,
default_axis=None, **eval_kwargs):
# copied from Series na_op above, but without unnecessary branch for
# non-scalar
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True, **eval_kwargs)
except TypeError:
# TODO: might need to find_common_type here?
result = np.empty(len(x), dtype=x.dtype)
mask = notnull(x)
result[mask] = op(x[mask], y)
result, changed = com._maybe_upcast_putmask(result, ~mask, np.nan)
result = com._fill_zeros(result, x, y, name, fill_zeros)
return result
# work only for scalars
def f(self, other):
if not isscalar(other):
raise ValueError('Simple arithmetic with %s can only be '
'done with scalar values' %
self._constructor.__name__)
return self._combine(other, op)
f.__name__ = name
return f
def _comp_method_PANEL(op, name, str_rep=None, masker=False):
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True)
except TypeError:
xrav = x.ravel()
result = np.empty(x.size, dtype=bool)
if isinstance(y, np.ndarray):
yrav = y.ravel()
mask = notnull(xrav) & notnull(yrav)
result[mask] = op(np.array(list(xrav[mask])),
np.array(list(yrav[mask])))
else:
mask = notnull(xrav)
result[mask] = op(np.array(list(xrav[mask])), y)
if op == operator.ne: # pragma: no cover
np.putmask(result, ~mask, True)
else:
np.putmask(result, ~mask, False)
result = result.reshape(x.shape)
return result
@Appender('Wrapper for comparison method %s' % name)
def f(self, other):
if isinstance(other, self._constructor):
return self._compare_constructor(other, na_op)
elif isinstance(other, (self._constructor_sliced, pd.DataFrame,
pd.Series)):
raise Exception("input needs alignment for this object [%s]" %
self._constructor)
else:
return self._combine_const(other, na_op)
f.__name__ = name
return f
panel_special_funcs = dict(arith_method=_arith_method_PANEL,
comp_method=_comp_method_PANEL,
bool_method=_arith_method_PANEL)
| artistic-2.0 |
abyssxsy/gnuradio | gr-filter/examples/interpolate.py | 58 | 8816 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys, time
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
from scipy import fftpack
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 100000 # number of samples to use
self._fs = 2000 # initial sampling rate
self._interp = 5 # Interpolation rate for PFB interpolator
self._ainterp = 5.5 # Resampling rate for the PFB arbitrary resampler
# Frequencies of the signals we construct
freq1 = 100
freq2 = 200
# Create a set of taps for the PFB interpolator
# This is based on the post-interpolation sample rate
self._taps = filter.firdes.low_pass_2(self._interp,
self._interp*self._fs,
freq2+50, 50,
attenuation_dB=120,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Create a set of taps for the PFB arbitrary resampler
# The filter size is the number of filters in the filterbank; 32 will give very low side-lobes,
# and larger numbers will reduce these even farther
# The taps in this filter are based on a sampling rate of the filter size since it acts
# internally as an interpolator.
flt_size = 32
self._taps2 = filter.firdes.low_pass_2(flt_size,
flt_size*self._fs,
freq2+50, 150,
attenuation_dB=120,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._interp))
print "Number of taps: ", len(self._taps)
print "Number of filters: ", self._interp
print "Taps per channel: ", tpc
# Create a couple of signals at different frequencies
self.signal1 = analog.sig_source_c(self._fs, analog.GR_SIN_WAVE, freq1, 0.5)
self.signal2 = analog.sig_source_c(self._fs, analog.GR_SIN_WAVE, freq2, 0.5)
self.signal = blocks.add_cc()
self.head = blocks.head(gr.sizeof_gr_complex, self._N)
# Construct the PFB interpolator filter
self.pfb = filter.pfb.interpolator_ccf(self._interp, self._taps)
# Construct the PFB arbitrary resampler filter
self.pfb_ar = filter.pfb.arb_resampler_ccf(self._ainterp, self._taps2, flt_size)
self.snk_i = blocks.vector_sink_c()
#self.pfb_ar.pfb.print_taps()
#self.pfb.pfb.print_taps()
# Connect the blocks
self.connect(self.signal1, self.head, (self.signal,0))
self.connect(self.signal2, (self.signal,1))
self.connect(self.signal, self.pfb)
self.connect(self.signal, self.pfb_ar)
self.connect(self.signal, self.snk_i)
# Create the sink for the interpolated signals
self.snk1 = blocks.vector_sink_c()
self.snk2 = blocks.vector_sink_c()
self.connect(self.pfb, self.snk1)
self.connect(self.pfb_ar, self.snk2)
def main():
tb = pfb_top_block()
tstart = time.time()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig1 = pylab.figure(1, figsize=(12,10), facecolor="w")
fig2 = pylab.figure(2, figsize=(12,10), facecolor="w")
fig3 = pylab.figure(3, figsize=(12,10), facecolor="w")
Ns = 10000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
# Plot input signal
fs = tb._fs
d = tb.snk_i.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b-o")
#p1_t = sp1_t.plot(t_in, x_in.imag, "r-o")
sp1_t.set_ylim([-2.5, 2.5])
sp1_t.set_title("Input Signal", weight="bold")
sp1_t.set_xlabel("Time (s)")
sp1_t.set_ylabel("Amplitude")
# Plot output of PFB interpolator
fs_int = tb._fs*tb._interp
sp2_f = fig2.add_subplot(2, 1, 1)
d = tb.snk1.data()[Ns:Ns+(tb._interp*Ne)]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_int/2.0, fs_int/2.0, fs_int/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+1])
sp2_f.set_ylim([-200.0, 50.0])
sp2_f.set_title("Output Signal from PFB Interpolator", weight="bold")
sp2_f.set_xlabel("Frequency (Hz)")
sp2_f.set_ylabel("Power (dBW)")
Ts_int = 1.0/fs_int
Tmax = len(d)*Ts_int
t_o = scipy.arange(0, Tmax, Ts_int)
x_o1 = scipy.array(d)
sp2_t = fig2.add_subplot(2, 1, 2)
p2_t = sp2_t.plot(t_o, x_o1.real, "b-o")
#p2_t = sp2_t.plot(t_o, x_o.imag, "r-o")
sp2_t.set_ylim([-2.5, 2.5])
sp2_t.set_title("Output Signal from PFB Interpolator", weight="bold")
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
# Plot output of PFB arbitrary resampler
fs_aint = tb._fs * tb._ainterp
sp3_f = fig3.add_subplot(2, 1, 1)
d = tb.snk2.data()[Ns:Ns+(tb._interp*Ne)]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_aint/2.0, fs_aint/2.0, fs_aint/float(X_o.size))
p3_f = sp3_f.plot(f_o, X_o, "b")
sp3_f.set_xlim([min(f_o), max(f_o)+1])
sp3_f.set_ylim([-200.0, 50.0])
sp3_f.set_title("Output Signal from PFB Arbitrary Resampler", weight="bold")
sp3_f.set_xlabel("Frequency (Hz)")
sp3_f.set_ylabel("Power (dBW)")
Ts_aint = 1.0/fs_aint
Tmax = len(d)*Ts_aint
t_o = scipy.arange(0, Tmax, Ts_aint)
x_o2 = scipy.array(d)
sp3_f = fig3.add_subplot(2, 1, 2)
p3_f = sp3_f.plot(t_o, x_o2.real, "b-o")
p3_f = sp3_f.plot(t_o, x_o1.real, "m-o")
#p3_f = sp3_f.plot(t_o, x_o2.imag, "r-o")
sp3_f.set_ylim([-2.5, 2.5])
sp3_f.set_title("Output Signal from PFB Arbitrary Resampler", weight="bold")
sp3_f.set_xlabel("Time (s)")
sp3_f.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
waddell/urbansim | urbansim/utils/tests/test_testing.py | 5 | 2190 |
import pandas as pd
import pytest
from .. import testing
def test_frames_equal_not_frames():
frame = pd.DataFrame({'a': [1]})
with pytest.raises(AssertionError) as info:
testing.assert_frames_equal(frame, 1)
assert info.value.message == 'Inputs must both be pandas DataFrames.'
def test_frames_equal_mismatched_columns():
expected = pd.DataFrame({'a': [1]})
actual = pd.DataFrame({'b': [2]})
with pytest.raises(AssertionError) as info:
testing.assert_frames_equal(actual, expected)
assert info.value.message == "Expected column 'a' not found."
def test_frames_equal_mismatched_rows():
expected = pd.DataFrame({'a': [1]}, index=[0])
actual = pd.DataFrame({'a': [1]}, index=[1])
with pytest.raises(AssertionError) as info:
testing.assert_frames_equal(actual, expected)
assert info.value.message == "Expected row 0 not found."
def test_frames_equal_mismatched_items():
expected = pd.DataFrame({'a': [1]})
actual = pd.DataFrame({'a': [2]})
with pytest.raises(AssertionError) as info:
testing.assert_frames_equal(actual, expected)
assert info.value.message == """
Items are not equal:
ACTUAL: 2
DESIRED: 1
Column: 'a'
Row: 0"""
def test_frames_equal():
frame = pd.DataFrame({'a': [1]})
testing.assert_frames_equal(frame, frame)
def test_frames_equal_close():
frame1 = pd.DataFrame({'a': [1]})
frame2 = pd.DataFrame({'a': [1.00000000000002]})
with pytest.raises(AssertionError):
testing.assert_frames_equal(frame1, frame2)
testing.assert_frames_equal(frame1, frame2, use_close=True)
def test_index_equal_order_agnostic():
left = pd.Index([1, 2, 3])
right = pd.Index([3, 2, 1])
testing.assert_index_equal(left, right)
def test_index_equal_order_agnostic_raises_left():
left = pd.Index([1, 2, 3, 4])
right = pd.Index([3, 2, 1])
with pytest.raises(AssertionError):
testing.assert_index_equal(left, right)
def test_index_equal_order_agnostic_raises_right():
left = pd.Index([1, 2, 3])
right = pd.Index([3, 2, 1, 4])
with pytest.raises(AssertionError):
testing.assert_index_equal(left, right)
| bsd-3-clause |
chatelak/RMG-Py | rmgpy/stats.py | 4 | 8698 | #!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2012 Prof. Richard H. West (r.west@neu.edu),
# Prof. William H. Green (whgreen@mit.edu)
# and the RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
import os.path
import logging
try:
import xlwt
except ImportError:
logging.warning(
'Optional package dependency "xlwt" not loaded;\
Some output features will not work.'
)
import matplotlib.pyplot as plt
from rmgpy.util import makeOutputSubdirectory
class ExecutionStatsWriter(object):
"""
This class listens to a RMG subject
and writes an excel file with the memory footprint
requirements through the course of an RMG simulation,
It also generates a number of images with information on the core/edge
species/reaction evolutions through the course of an RMG simulation.
Files are written to the 'plot' subfolder.
A new instance of the class can be appended to a subject as follows:
rmg = ...
listener = ExecutionStatsWriter()
rmg.attach(listener)
Whenever the subject calls the .notify() method, the
.update() method of the listener will be called.
To stop listening to the subject, the class can be detached
from its subject:
rmg.detach(listener)
"""
def __init__(self, outputDirectory):
super(ExecutionStatsWriter, self).__init__()
makeOutputSubdirectory(outputDirectory, 'plot')
# RMG execution statistics
self.coreSpeciesCount = []
self.coreReactionCount = []
self.edgeSpeciesCount = []
self.edgeReactionCount = []
self.restartSize = []
self.memoryUse = []
def update(self, rmg):
self.update_execution(rmg)
def update_execution(self, rmg):
# Update RMG execution statistics
logging.info('Updating RMG execution statistics...')
coreSpec, coreReac, edgeSpec, edgeReac = rmg.reactionModel.getModelSize()
self.coreSpeciesCount.append(coreSpec)
self.coreReactionCount.append(coreReac)
self.edgeSpeciesCount.append(edgeSpec)
self.edgeReactionCount.append(edgeReac)
elapsed = rmg.execTime[-1]
seconds = elapsed % 60
minutes = (elapsed - seconds) % 3600 / 60
hours = (elapsed - seconds - minutes * 60) % (3600 * 24) / 3600
days = (elapsed - seconds - minutes * 60 - hours * 3600) / (3600 * 24)
logging.info(' Execution time (DD:HH:MM:SS): '
'{0:02}:{1:02}:{2:02}:{3:02}'.format(int(days), int(hours), int(minutes), int(seconds)))
try:
import psutil
process = psutil.Process(os.getpid())
rss, vms = process.memory_info()
self.memoryUse.append(rss / 1.0e6)
logging.info(' Memory used: %.2f MB' % (self.memoryUse[-1]))
except:
logging.info(' Memory used: memory usage was unable to be logged')
self.memoryUse.append(0.0)
if os.path.exists(os.path.join(rmg.outputDirectory,'restart.pkl.gz')):
self.restartSize.append(os.path.getsize(os.path.join(rmg.outputDirectory,'restart.pkl.gz')) / 1.0e6)
logging.info(' Restart file size: %.2f MB' % (self.restartSize[-1]))
else:
self.restartSize.append(0.0)
self.saveExecutionStatistics(rmg)
if rmg.generatePlots:
self.generateExecutionPlots(rmg)
logging.info('')
def saveExecutionStatistics(self, rmg):
"""
Save the statistics of the RMG job to an Excel spreadsheet for easy viewing
after the run is complete. The statistics are saved to the file
`statistics.xls` in the output directory. The ``xlwt`` package is used to
create the spreadsheet file; if this package is not installed, no file is
saved.
"""
# Attempt to import the xlwt package; return if not installed
try:
xlwt
except NameError:
logging.warning('Package xlwt not loaded. Unable to save execution statistics.')
return
# Create workbook and sheet for statistics to be places
workbook = xlwt.Workbook()
sheet = workbook.add_sheet('Statistics')
# First column is execution time
sheet.write(0,0,'Execution time (s)')
for i, etime in enumerate(rmg.execTime):
sheet.write(i+1,0,etime)
# Second column is number of core species
sheet.write(0,1,'Core species')
for i, count in enumerate(self.coreSpeciesCount):
sheet.write(i+1,1,count)
# Third column is number of core reactions
sheet.write(0,2,'Core reactions')
for i, count in enumerate(self.coreReactionCount):
sheet.write(i+1,2,count)
# Fourth column is number of edge species
sheet.write(0,3,'Edge species')
for i, count in enumerate(self.edgeSpeciesCount):
sheet.write(i+1,3,count)
# Fifth column is number of edge reactions
sheet.write(0,4,'Edge reactions')
for i, count in enumerate(self.edgeReactionCount):
sheet.write(i+1,4,count)
# Sixth column is memory used
sheet.write(0,5,'Memory used (MB)')
for i, memory in enumerate(self.memoryUse):
sheet.write(i+1,5,memory)
# Seventh column is restart file size
sheet.write(0,6,'Restart file size (MB)')
for i, memory in enumerate(self.restartSize):
sheet.write(i+1,6,memory)
# Save workbook to file
fstr = os.path.join(rmg.outputDirectory, 'statistics.xls')
workbook.save(fstr)
def generateExecutionPlots(self, rmg):
"""
Generate a number of plots describing the statistics of the RMG job,
including the reaction model core and edge size and memory use versus
execution time. These will be placed in the output directory in the plot/
folder.
"""
logging.info('Generating plots of execution statistics...')
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.semilogx(rmg.execTime, self.coreSpeciesCount, 'o-b')
ax1.set_xlabel('Execution time (s)')
ax1.set_ylabel('Number of core species')
ax2 = ax1.twinx()
ax2.semilogx(rmg.execTime, self.coreReactionCount, 'o-r')
ax2.set_ylabel('Number of core reactions')
plt.savefig(os.path.join(rmg.outputDirectory, 'plot/coreSize.svg'))
plt.clf()
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.loglog(rmg.execTime, self.edgeSpeciesCount, 'o-b')
ax1.set_xlabel('Execution time (s)')
ax1.set_ylabel('Number of edge species')
ax2 = ax1.twinx()
ax2.loglog(rmg.execTime, self.edgeReactionCount, 'o-r')
ax2.set_ylabel('Number of edge reactions')
plt.savefig(os.path.join(rmg.outputDirectory, 'plot/edgeSize.svg'))
plt.clf()
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.semilogx(rmg.execTime, self.memoryUse, 'o-k')
ax1.semilogx(rmg.execTime, self.restartSize, 'o-g')
ax1.set_xlabel('Execution time (s)')
ax1.set_ylabel('Memory (MB)')
ax1.legend(['RAM', 'Restart file'], loc=2)
plt.savefig(os.path.join(rmg.outputDirectory, 'plot/memoryUse.svg'))
plt.clf() | mit |
danielhomola/mifs | mifs/mi.py | 1 | 5334 | """
Methods for calculating Mutual Information in an embarrassingly parallel way.
Author: Daniel Homola <dani.homola@gmail.com>
License: BSD 3 clause
"""
import numpy as np
from scipy.special import gamma, psi
from sklearn.neighbors import NearestNeighbors
from joblib import Parallel, delayed
def get_mi_vector(MI_FS, F, s):
"""
Calculates the Mututal Information between each feature in F and s.
This function is for when |S| > 1. s is the previously selected feature.
We exploite the fact that this step is embarrassingly parallel.
"""
MIs = Parallel(n_jobs=MI_FS.n_jobs)(delayed(_get_mi)(f, s, MI_FS)
for f in F)
return MIs
def _get_mi(f, s, MI_FS):
n, p = MI_FS.X.shape
if MI_FS.method in ['JMI', 'JMIM']:
# JMI & JMIM
joint = MI_FS.X[:, (s, f)]
if MI_FS.categorical:
MI = _mi_dc(joint, MI_FS.y, MI_FS.k)
else:
vars = (joint, MI_FS.y)
MI = _mi_cc(vars, MI_FS.k)
else:
# MRMR
vars = (MI_FS.X[:, s].reshape(n, 1), MI_FS.X[:, f].reshape(n, 1))
MI = _mi_cc(vars, MI_FS.k)
# MI must be non-negative
if MI > 0:
return MI
else:
return np.nan
def get_first_mi_vector(MI_FS, k):
"""
Calculates the Mututal Information between each feature in X and y.
This function is for when |S| = 0. We select the first feautre in S.
"""
n, p = MI_FS.X.shape
MIs = Parallel(n_jobs=MI_FS.n_jobs)(delayed(_get_first_mi)(i, k, MI_FS)
for i in range(p))
return MIs
def _get_first_mi(i, k, MI_FS):
n, p = MI_FS.X.shape
if MI_FS.categorical:
x = MI_FS.X[:, i].reshape((n, 1))
MI = _mi_dc(x, MI_FS.y, k)
else:
vars = (MI_FS.X[:, i].reshape((n, 1)), MI_FS.y)
MI = _mi_cc(vars, k)
# MI must be non-negative
if MI > 0:
return MI
else:
return np.nan
def _mi_dc(x, y, k):
"""
Calculates the mututal information between a continuous vector x and a
disrete class vector y.
This implementation can calculate the MI between the joint distribution of
one or more continuous variables (X[:, 1:3]) with a discrete variable (y).
Thanks to Adam Pocock, the author of the FEAST package for the idea.
Brian C. Ross, 2014, PLOS ONE
Mutual Information between Discrete and Continuous Data Sets
"""
y = y.flatten()
n = x.shape[0]
classes = np.unique(y)
knn = NearestNeighbors(n_neighbors=k)
# distance to kth in-class neighbour
d2k = np.empty(n)
# number of points within each point's class
Nx = []
for yi in y:
Nx.append(np.sum(y == yi))
# find the distance of the kth in-class point
for c in classes:
mask = np.where(y == c)[0]
knn.fit(x[mask, :])
d2k[mask] = knn.kneighbors()[0][:, -1]
# find the number of points within the distance of the kth in-class point
knn.fit(x)
m = knn.radius_neighbors(radius=d2k, return_distance=False)
m = [i.shape[0] for i in m]
# calculate MI based on Equation 2 in Ross 2014
MI = psi(n) - np.mean(psi(Nx)) + psi(k) - np.mean(psi(m))
return MI
def _mi_cc(variables, k=1):
"""
Returns the mutual information between any number of variables.
Here it is used to estimate MI between continuous X(s) and y.
Written by Gael Varoquaux:
https://gist.github.com/GaelVaroquaux/ead9898bd3c973c40429
"""
all_vars = np.hstack(variables)
return (sum([_entropy(X, k=k) for X in variables]) -
_entropy(all_vars, k=k))
def _nearest_distances(X, k=1):
"""
Returns the distance to the kth nearest neighbor for every point in X
"""
knn = NearestNeighbors(n_neighbors=k, metric='chebyshev')
knn.fit(X)
# the first nearest neighbor is itself
d, _ = knn.kneighbors(X)
# returns the distance to the kth nearest neighbor
return d[:, -1]
def _entropy(X, k=1):
"""
Returns the entropy of the X.
Written by Gael Varoquaux:
https://gist.github.com/GaelVaroquaux/ead9898bd3c973c40429
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data the entropy of which is computed
k : int, optional
number of nearest neighbors for density estimation
References
----------
Kozachenko, L. F. & Leonenko, N. N. 1987 Sample estimate of entropy
of a random vector. Probl. Inf. Transm. 23, 95-101.
See also: Evans, D. 2008 A computationally efficient estimator for
mutual information, Proc. R. Soc. A 464 (2093), 1203-1215.
and:
Kraskov A, Stogbauer H, Grassberger P. (2004). Estimating mutual
information. Phys Rev E 69(6 Pt 2):066138.
F. Perez-Cruz, (2008). Estimation of Information Theoretic Measures
for Continuous Random Variables. Advances in Neural Information
Processing Systems 21 (NIPS). Vancouver (Canada), December.
return d*mean(log(r))+log(volume_unit_ball)+log(n-1)-log(k)
"""
# Distance to kth nearest neighbor
r = _nearest_distances(X, k)
n, d = X.shape
volume_unit_ball = (np.pi ** (.5 * d)) / gamma(.5 * d + 1)
return (d * np.mean(np.log(r + np.finfo(X.dtype).eps)) +
np.log(volume_unit_ball) + psi(n) - psi(k))
| bsd-3-clause |
q1ang/scikit-learn | examples/ensemble/plot_ensemble_oob.py | 259 | 3265 | """
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <hui.kian.ho@gmail.com>
# Gilles Louppe <g.louppe@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for paralellised ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
| bsd-3-clause |
jaduimstra/nilmtk | nilmtk/metergroup.py | 2 | 70088 | from __future__ import print_function, division
import networkx as nx
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from datetime import timedelta
from warnings import warn
from sys import stdout
from collections import Counter
from copy import copy, deepcopy
import gc
from collections import namedtuple
# NILMTK imports
from .elecmeter import ElecMeter, ElecMeterID
from .appliance import Appliance
from .datastore.datastore import join_key
from .utils import (tree_root, nodes_adjacent_to_root, simplest_type_for,
flatten_2d_list, convert_to_timestamp, normalise_timestamp,
print_on_line, convert_to_list, append_or_extend_list,
most_common, capitalise_first_letter)
from .plots import plot_series
from .measurement import (select_best_ac_type, AC_TYPES, LEVEL_NAMES,
PHYSICAL_QUANTITIES_TO_AVERAGE)
from nilmtk.exceptions import MeasurementError
from .electric import Electric
from .timeframe import TimeFrame, split_timeframes
from .preprocessing import Apply
from .datastore import MAX_MEM_ALLOWANCE_IN_BYTES
from nilmtk.timeframegroup import TimeFrameGroup
# MeterGroupID.meters is a tuple of ElecMeterIDs. Order doesn't matter.
# (we can't use a set because sets aren't hashable so we can't use
# a set as a dict key or a DataFrame column name.)
MeterGroupID = namedtuple('MeterGroupID', ['meters'])
class MeterGroup(Electric):
"""A group of ElecMeter objects. Can contain nested MeterGroup objects.
Implements many of the same methods as ElecMeter.
Attributes
----------
meters : list of ElecMeters or nested MeterGroups
disabled_meters : list of ElecMeters or nested MeterGroups
name : only set by functions like 'groupby' and 'select_top_k'
"""
def __init__(self, meters=None, disabled_meters=None):
self.meters = convert_to_list(meters)
self.disabled_meters = convert_to_list(disabled_meters)
self.name = ""
def import_metadata(self, store, elec_meters, appliances, building_id):
"""
Parameters
----------
store : nilmtk.DataStore
elec_meters : dict of dicts
metadata for each ElecMeter
appliances : list of dicts
metadata for each Appliance
building_id : BuildingID
"""
# Sanity checking
assert isinstance(elec_meters, dict)
assert isinstance(appliances, list)
assert isinstance(building_id, tuple)
if not elec_meters:
warn("Building {} has an empty 'elec_meters' object."
.format(building_id.instance), RuntimeWarning)
if not appliances:
warn("Building {} has an empty 'appliances' list."
.format(building_id.instance), RuntimeWarning)
# Load static Meter Devices
ElecMeter.load_meter_devices(store)
# Load each meter
for meter_i, meter_metadata_dict in elec_meters.iteritems():
meter_id = ElecMeterID(instance=meter_i,
building=building_id.instance,
dataset=building_id.dataset)
meter = ElecMeter(store, meter_metadata_dict, meter_id)
self.meters.append(meter)
# Load each appliance
for appliance_md in appliances:
appliance_md['dataset'] = building_id.dataset
appliance_md['building'] = building_id.instance
appliance = Appliance(appliance_md)
meter_ids = [ElecMeterID(instance=meter_instance,
building=building_id.instance,
dataset=building_id.dataset)
for meter_instance in appliance.metadata['meters']]
if appliance.n_meters == 1:
# Attach this appliance to just a single meter
meter = self[meter_ids[0]]
if isinstance(meter, MeterGroup): # MeterGroup of site_meters
metergroup = meter
for meter in metergroup.meters:
meter.appliances.append(appliance)
else:
meter.appliances.append(appliance)
else:
# DualSupply or 3-phase appliance so need a meter group
metergroup = MeterGroup()
metergroup.meters = [self[meter_id] for meter_id in meter_ids]
for meter in metergroup.meters:
# We assume that any meters used for measuring
# dual-supply or 3-phase appliances are not also used
# for measuring single-supply appliances.
self.meters.remove(meter)
meter.appliances.append(appliance)
self.meters.append(metergroup)
# disable disabled meters
meters_to_disable = [m for m in self.meters
if isinstance(m, ElecMeter)
and m.metadata.get('disabled')]
for meter in meters_to_disable:
self.meters.remove(meter)
self.disabled_meters.append(meter)
def union(self, other):
"""
Returns
-------
new MeterGroup where its set of `meters` is the union of
`self.meters` and `other.meters`.
"""
if not isinstance(other, MeterGroup):
raise TypeError()
return MeterGroup(set(self.meters).union(other.meters))
def dominant_appliance(self):
dominant_appliances = [meter.dominant_appliance()
for meter in self.meters]
dominant_appliances = list(set(dominant_appliances))
n_dominant_appliances = len(dominant_appliances)
if n_dominant_appliances == 0:
return
elif n_dominant_appliances == 1:
return dominant_appliances[0]
else:
raise RuntimeError(
"More than one dominant appliance in MeterGroup!"
" (The dominant appliance per meter should be manually"
" specified in the metadata. If it isn't and if there are"
" multiple appliances for a meter then NILMTK assumes"
" all appliances on that meter are dominant. NILMTK"
" can't automatically distinguish between multiple"
" appliances on the same meter (at least,"
" not without using NILM!))")
def nested_metergroups(self):
return [m for m in self.meters if isinstance(m, MeterGroup)]
def __getitem__(self, key):
"""Get a single meter using appliance type and instance unless
ElecMeterID is supplied.
These formats for `key` are accepted:
Retrieve a meter using details of the meter:
* `1` - retrieves meter instance 1, raises Exception if there are
more than one meter with this instance, raises KeyError
if none are found. If meter instance 1 is in a nested MeterGroup
then retrieve the ElecMeter, not the MeterGroup.
* `ElecMeterID(1, 1, 'REDD')` - retrieves meter with specified meter ID
* `MeterGroupID(meters=(ElecMeterID(1, 1, 'REDD')))` - retrieves
existing nested MeterGroup containing exactly meter instances 1 and 2.
* `[ElecMeterID(1, 1, 'REDD'), ElecMeterID(2, 1, 'REDD')]` - retrieves
existing nested MeterGroup containing exactly meter instances 1 and 2.
* `ElecMeterID(0, 1, 'REDD')` - instance `0` means `mains`. This returns
a new MeterGroup of all site_meters in building 1 in REDD.
* `ElecMeterID((1,2), 1, 'REDD')` - retrieve existing MeterGroup
which contains exactly meters 1 & 2.
* `(1, 2, 'REDD')` - converts to ElecMeterID and treats as an ElecMeterID.
Items must be in the order expected for an ElecMeterID.
Retrieve a meter using details of appliances attached to the meter:
* `'toaster'` - retrieves meter or group upstream of toaster instance 1
* `'toaster', 2` - retrieves meter or group upstream of toaster instance 2
* `{'dataset': 'redd', 'building': 3, 'type': 'toaster', 'instance': 2}`
- specify an appliance
Returns
-------
ElecMeter or MeterGroup
"""
if isinstance(key, str):
# default to get first meter
return self[(key, 1)]
elif isinstance(key, ElecMeterID):
if isinstance(key.instance, tuple):
# find meter group from a key of the form
# ElecMeterID(instance=(1,2), building=1, dataset='REDD')
for group in self.nested_metergroups():
if (set(group.instance()) == set(key.instance) and
group.building() == key.building and
group.dataset() == key.dataset):
return group
# Else try to find an ElecMeter with instance=(1,2)
for meter in self.meters:
if meter.identifier == key:
return meter
elif key.instance == 0:
metergroup_of_building = self.select(
building=key.building, dataset=key.dataset)
return metergroup_of_building.mains()
else:
for meter in self.meters:
if meter.identifier == key:
return meter
raise KeyError(key)
elif isinstance(key, MeterGroupID):
key_meters = set(key.meters)
for group in self.nested_metergroups():
if (set(group.identifier.meters) == key_meters):
return group
raise KeyError(key)
# find MeterGroup from list of ElecMeterIDs
elif isinstance(key, list):
if not all([isinstance(item, tuple) for item in key]):
raise TypeError("requires a list of ElecMeterID objects.")
for meter in self.meters: # TODO: write unit tests for this
# list of ElecMeterIDs. Return existing MeterGroup
if isinstance(meter, MeterGroup):
metergroup = meter
meter_ids = set(metergroup.identifier.meters)
if meter_ids == set(key):
return metergroup
raise KeyError(key)
elif isinstance(key, tuple):
if len(key) == 2:
if isinstance(key[0], str):
return self[{'type': key[0], 'instance': key[1]}]
else:
# Assume we're dealing with a request for 2 ElecMeters
return MeterGroup([self[i] for i in key])
elif len(key) == 3:
return self[ElecMeterID(*key)]
else:
raise TypeError()
elif isinstance(key, dict):
meters = []
for meter in self.meters:
if meter.matches_appliances(key):
meters.append(meter)
if len(meters) == 1:
return meters[0]
elif len(meters) > 1:
raise Exception('search terms match {} appliances'
.format(len(meters)))
else:
raise KeyError(key)
elif isinstance(key, int) and not isinstance(key, bool):
meters_found = []
for meter in self.meters:
if isinstance(meter.instance(), int):
if meter.instance() == key:
meters_found.append(meter)
elif isinstance(meter.instance(), (tuple, list)):
if key in meter.instance():
if isinstance(meter, MeterGroup):
print("Meter", key, "is in a nested meter group."
" Retrieving just the ElecMeter.")
meters_found.append(meter[key])
else:
meters_found.append(meter)
n_meters_found = len(meters_found)
if n_meters_found > 1:
raise Exception('{} meters found with instance == {}: {}'
.format(n_meters_found, key, meters_found))
elif n_meters_found == 0:
raise KeyError(
'No meters found with instance == {}'.format(key))
else:
return meters_found[0]
else:
raise TypeError()
def matches(self, key):
for meter in self.meters:
if meter.matches(key):
return True
return False
def select(self, **kwargs):
"""Select a group of meters based on meter metadata.
e.g.
* select(building=1, sample_period=6)
* select(room='bathroom')
If multiple criteria are supplied then these are ANDed together.
Returns
-------
new MeterGroup of selected meters.
Ideas for the future (not implemented yet!)
-------------------------------------------
* select(category=['ict', 'lighting'])
* select([(fridge, 1), (tv, 1)]) # get specifically fridge 1 and tv 1
* select(name=['fridge', 'tv']) # get all fridges and tvs
* select(category='lighting', except={'room'=['kitchen lights']})
* select('all', except=[('tv', 1)])
Also: see if we can do select(category='lighting' | name='tree lights')
or select(energy > 100)?? Perhaps using:
* Python's eval function something like this:
>>> s = pd.Series(np.random.randn(5))
>>> eval('(x > 0) | (index > 2)', {'x':s, 'index':s.index})
Hmm, yes, maybe we should just implement this! e.g.
select("(category == 'lighting') | (category == 'ict')")
But what about:
* select('total_energy > 100')
* select('mean(hours_on_per_day) > 3')
* select('max(hours_on_per_day) > 5')
* select('max(power) > 2000')
* select('energy_per_day > 2')
* select('rank_by_energy > 5') # top_k(5)
* select('rank_by_proportion > 0.2')
Maybe don't bother. That's easy enough
to get with itemised_energy(). Although these are quite nice
and shouldn't be too hard. Would need to only calculate
these stats if necessary though (e.g. by checking if 'total_energy'
is in the query string before running `eval`)
* or numexpr: https://github.com/pydata/numexpr
* see Pandas.eval():
* http://pandas.pydata.org/pandas-docs/stable/indexing.html#the-query-method-experimental
* https://github.com/pydata/pandas/blob/master/pandas/computation/eval.py#L119
"""
selected_meters = []
exception_raised_every_time = True
exception = None
func = kwargs.pop('func', 'matches')
for meter in self.meters:
try:
match = getattr(meter, func)(kwargs)
except KeyError as e:
exception = e
else:
exception_raised_every_time = False
if match:
selected_meters.append(meter)
if exception_raised_every_time and exception is not None:
raise exception
return MeterGroup(selected_meters)
def select_using_appliances(self, **kwargs):
"""Select a group of meters based on appliance metadata.
e.g.
* select(category='lighting')
* select(type='fridge')
* select(building=1, category='lighting')
* select(room='bathroom')
If multiple criteria are supplied then these are ANDed together.
Returns
-------
new MeterGroup of selected meters.
"""
return self.select(func='matches_appliances', **kwargs)
def from_list(self, meter_ids):
"""
Parameters
----------
meter_ids : list or tuple
Each element is an ElecMeterID or a MeterGroupID.
Returns
-------
MeterGroup
"""
meter_ids = list(meter_ids)
meter_ids = list(set(meter_ids)) # make unique
meters = []
def append_meter_group(meter_id):
try:
# see if there is an existing MeterGroup
metergroup = self[meter_id]
except KeyError:
# there is no existing MeterGroup so assemble one
metergroup = self.from_list(meter_id.meters)
meters.append(metergroup)
for meter_id in meter_ids:
if isinstance(meter_id, ElecMeterID):
meters.append(self[meter_id])
elif isinstance(meter_id, MeterGroupID):
append_meter_group(meter_id)
elif isinstance(meter_id, tuple):
meter_id = MeterGroupID(meters=meter_id)
append_meter_group(meter_id)
else:
raise TypeError()
return MeterGroup(meters)
@classmethod
def from_other_metergroup(cls, other, dataset):
"""Assemble a new meter group using the same meter IDs and nested
MeterGroups as `other`. This is useful for preparing a ground truth
metergroup from a meter group of NILM predictions.
Parameters
----------
other : MeterGroup
dataset : string
The `name` of the dataset for the ground truth. e.g. 'REDD'
Returns
-------
MeterGroup
"""
other_identifiers = other.identifier.meters
new_identifiers = []
for other_id in other_identifiers:
new_id = other_id._replace(dataset=dataset)
if isinstance(new_id.instance, tuple):
nested = []
for instance in new_id.instance:
new_nested_id = new_id._replace(instance=instance)
nested.append(new_nested_id)
new_identifiers.append(tuple(nested))
else:
new_identifiers.append(new_id)
metergroup = MeterGroup()
metergroup.from_list(new_identifiers)
return metergroup
def __eq__(self, other):
if isinstance(other, MeterGroup):
return set(other.meters) == set(self.meters)
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
@property
def appliances(self):
appliances = set()
for meter in self.meters:
appliances.update(meter.appliances)
return list(appliances)
def dominant_appliances(self):
appliances = set()
for meter in self.meters:
appliances.add(meter.dominant_appliance())
return list(appliances)
def values_for_appliance_metadata_key(self, key,
only_consider_dominant_appliance=True):
"""
Parameters
----------
key : str
e.g. 'type' or 'categories' or 'room'
Returns
-------
list
"""
values = []
if only_consider_dominant_appliance:
appliances = self.dominant_appliances()
else:
appliances = self.appliances
for appliance in appliances:
value = appliance.metadata.get(key)
append_or_extend_list(values, value)
value = appliance.type.get(key)
append_or_extend_list(values, value)
return list(set(values))
def get_labels(self, meter_ids, pretty=True):
"""Create human-readable meter labels.
Parameters
----------
meter_ids : list of ElecMeterIDs (or 3-tuples in same order as ElecMeterID)
Returns
-------
list of strings describing the appliances.
"""
meters = [self[meter_id] for meter_id in meter_ids]
labels = [meter.label(pretty=pretty) for meter in meters]
return labels
def __repr__(self):
s = "{:s}(meters=\n".format(self.__class__.__name__)
for meter in self.meters:
s += " " + str(meter).replace("\n", "\n ") + "\n"
s += ")"
return s
@property
def identifier(self):
"""Returns a MeterGroupID."""
return MeterGroupID(meters=tuple([meter.identifier for meter in self.meters]))
def instance(self):
"""Returns tuple of integers where each int is a meter instance."""
return tuple([meter.instance() for meter in self.meters])
def building(self):
"""Returns building instance integer(s)."""
buildings = set([meter.building() for meter in self.meters])
return simplest_type_for(buildings)
def contains_meters_from_multiple_buildings(self):
"""Returns True if this MeterGroup contains meters from
more than one building."""
building = self.building()
try:
n = len(building)
except TypeError:
return False
else:
return n > 1
def dataset(self):
"""Returns dataset string(s)."""
datasets = set([meter.dataset() for meter in self.meters])
return simplest_type_for(datasets)
def sample_period(self):
"""Returns max of all meter sample periods."""
return max([meter.sample_period() for meter in self.meters])
def wiring_graph(self):
"""Returns a networkx.DiGraph of connections between meters."""
wiring_graph = nx.DiGraph()
def _build_wiring_graph(meters):
for meter in meters:
if isinstance(meter, MeterGroup):
metergroup = meter
_build_wiring_graph(metergroup.meters)
else:
upstream_meter = meter.upstream_meter(raise_warning=False)
# Need to ensure we use the same object
# if upstream meter already exists.
if upstream_meter is not None:
for node in wiring_graph.nodes():
if upstream_meter == node:
upstream_meter = node
break
wiring_graph.add_edge(upstream_meter, meter)
_build_wiring_graph(self.meters)
return wiring_graph
def draw_wiring_graph(self, show_meter_labels=True):
graph = self.wiring_graph()
meter_labels = {meter: meter.instance() for meter in graph.nodes()}
pos = nx.graphviz_layout(graph, prog='dot')
nx.draw(graph, pos, labels=meter_labels, arrows=False)
if show_meter_labels:
meter_labels = {meter: meter.label() for meter in graph.nodes()}
for meter, name in meter_labels.iteritems():
x, y = pos[meter]
if meter.is_site_meter():
delta_y = 5
else:
delta_y = -5
plt.text(x, y+delta_y, s=name, bbox=dict(facecolor='red', alpha=0.5), horizontalalignment='center')
ax = plt.gca()
return graph, ax
def load(self, **kwargs):
"""Returns a generator of DataFrames loaded from the DataStore.
By default, `load` will load all available columns from the DataStore.
Specific columns can be selected in one or two mutually exclusive ways:
1. specify a list of column names using the `cols` parameter.
2. specify a `physical_quantity` and/or an `ac_type` parameter to ask
`load` to automatically select columns.
Each meter in the MeterGroup will first be resampled before being added.
The returned DataFrame will include NaNs at timestamps where no meter
had a sample (after resampling the meter).
Parameters
----------
sample_period : int or float, optional
Number of seconds to use as sample period when reindexing meters.
If not specified then will use the max of all meters' sample_periods.
resample_kwargs : dict of key word arguments (other than 'rule') to
`pass to pd.DataFrame.resample()`
chunksize : int, optional
the maximum number of rows per chunk. Note that each chunk is
guaranteed to be of length <= chunksize. Each chunk is *not*
guaranteed to be exactly of length == chunksize.
**kwargs :
any other key word arguments to pass to `self.store.load()` including:
physical_quantity : string or list of strings
e.g. 'power' or 'voltage' or 'energy' or ['power', 'energy'].
If a single string then load columns only for that physical quantity.
If a list of strings then load columns for all those physical
quantities.
ac_type : string or list of strings, defaults to None
Where 'ac_type' is short for 'alternating current type'. e.g.
'reactive' or 'active' or 'apparent'.
If set to None then will load all AC types per physical quantity.
If set to 'best' then load the single best AC type per
physical quantity.
If set to a single AC type then load just that single AC type per
physical quantity, else raise an Exception.
If set to a list of AC type strings then will load all those
AC types and will raise an Exception if any cannot be found.
cols : list of tuples, using NILMTK's vocabulary for measurements.
e.g. [('power', 'active'), ('voltage', ''), ('energy', 'reactive')]
`cols` can't be used if `ac_type` and/or `physical_quantity` are set.
preprocessing : list of Node subclass instances
e.g. [Clip()]
Returns
---------
Always return a generator of DataFrames (even if it only has a single
column).
.. note:: Different AC types will be treated separately.
"""
# Handle kwargs
sample_period = kwargs.setdefault('sample_period', self.sample_period())
sections = kwargs.pop('sections', [self.get_timeframe()])
chunksize = kwargs.pop('chunksize', MAX_MEM_ALLOWANCE_IN_BYTES)
duration_threshold = sample_period * chunksize
columns = pd.MultiIndex.from_tuples(
self._convert_physical_quantity_and_ac_type_to_cols(**kwargs)['cols'],
names=LEVEL_NAMES)
freq = '{:d}S'.format(int(sample_period))
verbose = kwargs.get('verbose')
# Check for empty sections
sections = [section for section in sections if section]
if not sections:
print("No sections to load.")
yield pd.DataFrame(columns=columns)
return
# Loop through each section to load
for section in split_timeframes(sections, duration_threshold):
kwargs['sections'] = [section]
start = normalise_timestamp(section.start, freq)
tz = None if start.tz is None else start.tz.zone
index = pd.date_range(
start.tz_localize(None), section.end.tz_localize(None), tz=tz,
closed='left', freq=freq)
chunk = combine_chunks_from_generators(
index, columns, self.meters, kwargs)
yield chunk
def _convert_physical_quantity_and_ac_type_to_cols(self, **kwargs):
all_columns = set()
kwargs = deepcopy(kwargs)
for meter in self.meters:
kwargs_copy = deepcopy(kwargs)
new_kwargs = meter._convert_physical_quantity_and_ac_type_to_cols(**kwargs_copy)
cols = new_kwargs.get('cols', [])
for col in cols:
all_columns.add(col)
kwargs['cols'] = list(all_columns)
return kwargs
def _meter_generators(self, **kwargs):
"""Returns (list of identifiers, list of generators)."""
generators = []
identifiers = []
for meter in self.meters:
kwargs_copy = deepcopy(kwargs)
generator = meter.load(**kwargs_copy)
generators.append(generator)
identifiers.append(meter.identifier)
return identifiers, generators
def simultaneous_switches(self, threshold=40):
"""
Parameters
----------
threshold : number, threshold in Watts
Returns
-------
sim_switches : pd.Series of type {timestamp: number of
simultaneous switches}
Notes
-----
This function assumes that the submeters in this MeterGroup
are all aligned. If they are not then you should align the
meters, e.g. by using an `Apply` node with `resample`.
"""
submeters = self.submeters().meters
count = Counter()
for meter in submeters:
switch_time_meter = meter.switch_times(threshold)
for timestamp in switch_time_meter:
count[timestamp] += 1
sim_switches = pd.Series(count)
# Should be 2 or more appliances changing state at the same time
sim_switches = sim_switches[sim_switches >= 2]
return sim_switches
def mains(self):
"""
Returns
-------
ElecMeter or MeterGroup or None
"""
if self.contains_meters_from_multiple_buildings():
msg = ("This MeterGroup contains meters from buildings '{}'."
" It only makes sense to get `mains` if the MeterGroup"
" contains meters from a single building."
.format(self.building()))
raise RuntimeError(msg)
site_meters = [meter for meter in self.meters if meter.is_site_meter()]
n_site_meters = len(site_meters)
if n_site_meters == 0:
return
elif n_site_meters == 1:
return site_meters[0]
else:
return MeterGroup(meters=site_meters)
def use_alternative_mains(self):
"""Swap present mains meter(s) for mains meter(s) in `disabled_meters`.
This is useful if the dataset has multiple, redundant mains meters
(e.g. in UK-DALE buildings 1, 2 and 5).
"""
present_mains = [m for m in self.meters if m.is_site_meter()]
alternative_mains = [m for m in self.disabled_meters if m.is_site_meter()]
if not alternative_mains:
raise RuntimeError("No site meters found in `self.disabled_meters`")
for meter in present_mains:
self.meters.remove(meter)
self.disabled_meters.append(meter)
for meter in alternative_mains:
self.meters.append(meter)
self.disabled_meters.remove(meter)
def upstream_meter(self):
"""Returns single upstream meter.
Raises RuntimeError if more than 1 upstream meter.
"""
upstream_meters = []
for meter in self.meters:
upstream_meters.append(meter.upstream_meter())
unique_upstream_meters = list(set(upstream_meters))
if len(unique_upstream_meters) > 1:
raise RuntimeError("{:d} upstream meters found for meter group."
" Should be 1.".format(len(unique_upstream_meters)))
return unique_upstream_meters[0]
def meters_directly_downstream_of_mains(self):
"""Returns new MeterGroup."""
meters = nodes_adjacent_to_root(self.wiring_graph())
assert isinstance(meters, list)
return MeterGroup(meters)
def submeters(self):
"""Returns new MeterGroup of all meters except site_meters"""
submeters = [meter for meter in self.meters
if not meter.is_site_meter()]
return MeterGroup(submeters)
def is_site_meter(self):
"""Returns True if any meters are site meters"""
return any([meter.is_site_meter() for meter in self.meters])
def total_energy(self, **load_kwargs):
"""Sums together total meter_energy for each meter.
Note that this function does *not* return the total aggregate
energy for a building. Instead this function adds up the total energy
for all the meters contained in this MeterGroup. If you want the total
aggregate energy then please use `MeterGroup.mains().total_energy()`.
Parameters
----------
full_results : bool, default=False
**loader_kwargs : key word arguments for DataStore.load()
Returns
-------
if `full_results` is True then return TotalEnergyResults object
else return a pd.Series with a row for each AC type.
"""
self._check_kwargs_for_full_results_and_sections(load_kwargs)
full_results = load_kwargs.pop('full_results', False)
meter_energies = self._collect_stats_on_all_meters(
load_kwargs, 'total_energy', full_results)
if meter_energies:
total_energy_results = meter_energies[0]
for meter_energy in meter_energies[1:]:
if full_results:
total_energy_results.unify(meter_energy)
else:
total_energy_results += meter_energy
return total_energy_results
def _collect_stats_on_all_meters(self, load_kwargs, func, full_results):
collected_stats = []
for meter in self.meters:
print_on_line("\rCalculating", func, "for", meter.identifier, "... ")
single_stat = getattr(meter, func)(full_results=full_results,
**load_kwargs)
collected_stats.append(single_stat)
if (full_results and len(self.meters) > 1 and
not meter.store.all_sections_smaller_than_chunksize):
warn("at least one section requested from '{}' required"
" multiple chunks to be loaded into memory. This may cause"
" a failure when we try to unify results from multiple"
" meters.".format(meter))
return collected_stats
def dropout_rate(self, **load_kwargs):
"""Sums together total energy for each meter.
Parameters
----------
full_results : bool, default=False
**loader_kwargs : key word arguments for DataStore.load()
Returns
-------
if `full_results` is True then return TotalEnergyResults object
else return either a single number of, if there are multiple
AC types, then return a pd.Series with a row for each AC type.
"""
self._check_kwargs_for_full_results_and_sections(load_kwargs)
full_results = load_kwargs.pop('full_results', False)
dropout_rates = self._collect_stats_on_all_meters(
load_kwargs, 'dropout_rate', full_results)
if full_results and dropout_rates:
dropout_rate_results = dropout_rates[0]
for dr in dropout_rates[1:]:
dropout_rate_results.unify(dr)
return dropout_rate_results
else:
return np.mean(dropout_rates)
def _check_kwargs_for_full_results_and_sections(self, load_kwargs):
if (load_kwargs.get('full_results')
and 'sections' not in load_kwargs
and len(self.meters) > 1):
raise RuntimeError("MeterGroup stats can only return full results"
" objects if you specify 'sections' to load. If"
" you do not specify periods then the results"
" from individual meters are likely to be for"
" different periods and hence"
" cannot be unified.")
def good_sections(self, **kwargs):
"""Returns good sections for just the first meter.
TODO: combine good sections from every meter.
"""
if self.meters:
if len(self.meters) > 1:
warn("As a quick implementation we only get Good Sections from"
" the first meter in the meter group. We should really"
" return the intersection of the good sections for all"
" meters. This will be fixed...")
return self.meters[0].good_sections(**kwargs)
else:
return []
def dataframe_of_meters(self, **kwargs):
"""
Parameters
----------
sample_period : int or float, optional
Number of seconds to use as sample period when reindexing meters.
If not specified then will use the max of all meters' sample_periods.
resample : bool, defaults to True
If True then resample to `sample_period`.
**kwargs :
any other key word arguments to pass to `self.store.load()` including:
ac_type : string, defaults to 'best'
physical_quantity: string, defaults to 'power'
Returns
-------
DataFrame
Each column is a meter.
"""
kwargs.setdefault('sample_period', self.sample_period())
kwargs.setdefault('ac_type', 'best')
kwargs.setdefault('physical_quantity', 'power')
identifiers, generators = self._meter_generators(**kwargs)
segments = []
while True:
chunks = []
ids = []
for meter_id, generator in zip(identifiers, generators):
try:
chunk_from_next_meter = next(generator)
except StopIteration:
continue
if not chunk_from_next_meter.empty:
ids.append(meter_id)
chunks.append(chunk_from_next_meter.sum(axis=1))
if chunks:
df = pd.concat(chunks, axis=1)
df.columns = ids
segments.append(df)
else:
break
if segments:
return pd.concat(segments)
else:
return pd.DataFrame(columns=self.identifier.meters)
def entropy_per_meter(self):
"""Finds the entropy of each meter in this MeterGroup.
Returns
-------
pd.Series of entropy
"""
return self.call_method_on_all_meters('entropy')
def call_method_on_all_meters(self, method):
"""Calls `method` on each element in `self.meters`.
Parameters
----------
method : str
Name of a stats method in `ElecMeter`. e.g. 'correlation'.
Returns
-------
pd.Series of result of `method` called on each element in `self.meters`.
"""
meter_identifiers = list(self.identifier.meters)
result = pd.Series(index=meter_identifiers)
for meter in self.meters:
id_meter = meter.identifier
result[id_meter] = getattr(meter, method)()
return result
def pairwise(self, method):
"""
Calls `method` on all pairs in `self.meters`.
Assumes `method` is symmetrical.
Parameters
----------
method : str
Name of a stats method in `ElecMeter`. e.g. 'correlation'.
Returns
-------
pd.DataFrame of the result of `method` called on each
pair in `self.meters`.
"""
meter_identifiers = list(self.identifier.meters)
result = pd.DataFrame(index=meter_identifiers, columns=meter_identifiers)
for i, m_i in enumerate(self.meters):
for j, m_j in enumerate(self.meters):
id_i = m_i.identifier
id_j = m_j.identifier
if i > j:
result[id_i][id_j] = result[id_j][id_i]
else:
result[id_i][id_j] = getattr(m_i, method)(m_j)
return result
def pairwise_mutual_information(self):
"""
Finds the pairwise mutual information among different
meters in a MeterGroup.
Returns
-------
pd.DataFrame of mutual information between
pair of ElecMeters.
"""
return self.pairwise('mutual_information')
def pairwise_correlation(self):
"""
Finds the pairwise correlation among different
meters in a MeterGroup.
Returns
-------
pd.DataFrame of correlation between pair of ElecMeters.
"""
return self.pairwise('correlation')
def proportion_of_energy_submetered(self, **loader_kwargs):
"""
Returns
-------
float [0,1] or NaN if mains total_energy == 0
"""
print("Running MeterGroup.proportion_of_energy_submetered...")
mains = self.mains()
downstream_meters = self.meters_directly_downstream_of_mains()
proportion = 0.0
verbose = loader_kwargs.get('verbose')
all_nan = True
for m in downstream_meters.meters:
if verbose:
print("Calculating proportion for", m)
prop = m.proportion_of_energy(mains, **loader_kwargs)
if not np.isnan(prop):
proportion += prop
all_nan = False
if verbose:
print(" {:.2%}".format(prop))
if all_nan:
proportion = np.NaN
return proportion
def available_ac_types(self, physical_quantity):
"""Returns set of all available alternating current types for a
specific physical quantity.
Parameters
----------
physical_quantity : str or list of strings
Returns
-------
list of strings e.g. ['apparent', 'active']
"""
all_ac_types = [meter.available_ac_types(physical_quantity)
for meter in self.meters]
return list(set(flatten_2d_list(all_ac_types)))
def available_physical_quantities(self):
"""
Returns
-------
list of strings e.g. ['power', 'energy']
"""
all_physical_quants = [meter.available_physical_quantities()
for meter in self.meters]
return list(set(flatten_2d_list(all_physical_quants)))
def energy_per_meter(self, per_period=None, mains=None,
use_meter_labels=False, **load_kwargs):
"""Returns pd.DataFrame where columns is meter.identifier and
each value is total energy. Index is AC types.
Does not care about wiring hierarchy. Does not attempt to ensure all
channels share the same time sections.
Parameters
----------
per_period : None or offset alias
If None then returns absolute energy used per meter.
If a Pandas offset alias (e.g. 'D' for 'daily') then
will return the average energy per period.
ac_type : None or str
e.g. 'active' or 'best'. Defaults to 'best'.
use_meter_labels : bool
If True then columns will be human-friendly meter labels.
If False then columns will be ElecMeterIDs or MeterGroupIDs
mains : None or MeterGroup or ElecMeter
If None then will return DataFrame without remainder.
If not None then will return a Series including a 'remainder'
row which will be `mains.total_energy() - energy_per_meter.sum()`
and an attempt will be made to use the correct AC_TYPE.
Returns
-------
pd.DataFrame if mains is None else a pd.Series
"""
meter_identifiers = list(self.identifier.meters)
energy_per_meter = pd.DataFrame(columns=meter_identifiers, index=AC_TYPES)
n_meters = len(self.meters)
load_kwargs.setdefault('ac_type', 'best')
for i, meter in enumerate(self.meters):
print('\r{:d}/{:d} {}'.format(i+1, n_meters, meter), end='')
stdout.flush()
if per_period is None:
meter_energy = meter.total_energy(**load_kwargs)
else:
load_kwargs.setdefault('use_uptime', False)
meter_energy = meter.average_energy_per_period(
offset_alias=per_period, **load_kwargs)
energy_per_meter[meter.identifier] = meter_energy
energy_per_meters = energy_per_meter.dropna(how='all')
if use_meter_labels:
energy_per_meter.columns = self.get_labels(energy_per_meter.columns)
if mains is not None:
energy_per_meter = self._energy_per_meter_with_remainder(
energy_per_meter, mains, per_period, **load_kwargs)
return energy_per_meter
def _energy_per_meter_with_remainder(self, energy_per_meter,
mains, per_period, **kwargs):
ac_types = energy_per_meter.keys()
energy_per_meter = energy_per_meter.sum() # Collapse AC_TYPEs into Series
# Find most common ac_type in energy_per_meter:
most_common_ac_type = most_common(ac_types)
mains_ac_types = mains.available_ac_types(
['power', 'energy', 'cumulative energy'])
if most_common_ac_type in mains_ac_types:
mains_ac_type = most_common_ac_type
else:
mains_ac_type = 'best'
# Get mains energy_per_meter
kwargs['ac_type'] = mains_ac_type
if per_period is None:
mains_energy = mains.total_energy(**kwargs)
else:
mains_energy = mains.average_energy_per_period(
offset_alias=per_period, **kwargs)
mains_energy = mains_energy[mains_energy.keys()[0]]
# Calculate remainder
energy_per_meter['Remainder'] = mains_energy - energy_per_meter.sum()
energy_per_meter.sort(ascending=False)
return energy_per_meter
def fraction_per_meter(self, **load_kwargs):
"""Fraction of energy per meter.
Return pd.Series. Index is meter.instance.
Each value is a float in the range [0,1].
"""
energy_per_meter = self.energy_per_meter(**load_kwargs).max()
total_energy = energy_per_meter.sum()
return energy_per_meter / total_energy
def proportion_of_upstream_total_per_meter(self, **load_kwargs):
prop_per_meter = pd.Series(index=self.identifier.meters)
n_meters = len(self.meters)
for i, meter in enumerate(self.meters):
proportion = meter.proportion_of_upstream(**load_kwargs)
print('\r{:d}/{:d} {} = {:.3f}'
.format(i+1, n_meters, meter, proportion), end='')
stdout.flush()
prop_per_meter[meter.identifier] = proportion
prop_per_meter.sort(ascending=False)
return prop_per_meter
def train_test_split(self, train_fraction=0.5):
"""
Parameters
----------
train_fraction
Returns
-------
split_time: pd.Timestamp where split should happen
"""
assert(
0 < train_fraction < 1), "`train_fraction` should be between 0 and 1"
# TODO: currently just works with the first mains meter, assuming
# both to be simultaneosly sampled
mains = self.mains()
good_sections = self.mains().good_sections()
sample_period = mains.device['sample_period']
appx_num_records_in_each_good_section = [
int((ts.end - ts.start).total_seconds() / sample_period) for ts in good_sections]
appx_total_records = sum(appx_num_records_in_each_good_section)
records_in_train = appx_total_records * train_fraction
seconds_in_train = int(records_in_train * sample_period)
if len(good_sections) == 1:
# all data is contained in one good section
split_point = good_sections[
0].start + timedelta(seconds=seconds_in_train)
return split_point
else:
# data is split across multiple time deltas
records_remaining = records_in_train
while records_remaining:
for i, records_in_section in enumerate(appx_num_records_in_each_good_section):
if records_remaining > records_in_section:
records_remaining -= records_in_section
elif records_remaining == records_in_section:
# Next TimeFrame is the split point!!
split_point = good_sections[i + 1].start
return split_point
else:
# Need to split this timeframe
split_point = good_sections[
i].start + timedelta(seconds=sample_period * records_remaining)
return split_point
################## FUNCTIONS NOT YET IMPLEMENTED ###################
# def init_new_dataset(self):
# self.infer_and_set_meter_connections()
# self.infer_and_set_dual_supply_appliances()
# def infer_and_set_meter_connections(self):
# """
# Arguments
# ---------
# meters : list of Meter objects
# """
# Maybe this should be a stand-alone function which
# takes a list of meters???
# raise NotImplementedError
# def infer_and_set_dual_supply_appliances(self):
# raise NotImplementedError
# def total_on_duration(self):
# """Return timedelta"""
# raise NotImplementedError
# def on_durations(self):
# self.get_unique_upstream_meters()
# for each meter, get the on time,
# assuming the on-power-threshold for the
# smallest appliance connected to that meter???
# raise NotImplementedError
# def activity_distribution(self, bin_size, timespan):
# raise NotImplementedError
# def on_off_events(self, minimum_state_duration):
# raise NotImplementedError
def select_top_k(self, k=5, by="energy", asc=False, group_remainder=False, **kwargs):
"""Only select the top K meters, according to energy.
Functions on the entire MeterGroup. So if you mean to select
the top K from only the submeters, please do something like
this:
elec.submeters().select_top_k()
Parameters
----------
k : int, optional, defaults to 5
by: string, optional, defaults to energy
Can select top k by:
* energy
* entropy
asc: bool, optional, defaults to False
By default top_k is in descending order. To select top_k
by ascending order, use asc=True
group_remainder : bool, optional, defaults to False
If True then place all remaining meters into a
nested metergroup.
**kwargs : key word arguments to pass to load()
Returns
-------
MeterGroup
"""
function_map = {'energy': self.fraction_per_meter, 'entropy': self.entropy_per_meter}
top_k_series = function_map[by](**kwargs)
top_k_series.sort(ascending=asc)
top_k_elec_meter_ids = top_k_series[:k].index
top_k_metergroup = self.from_list(top_k_elec_meter_ids)
if group_remainder:
remainder_ids = top_k_series[k:].index
remainder_metergroup = self.from_list(remainder_ids)
remainder_metergroup.name = 'others'
top_k_metergroup.meters.append(remainder_metergroup)
return top_k_metergroup
def groupby(self, key, use_appliance_metadata=True, **kwargs):
"""
e.g. groupby('category')
Returns
-------
MeterGroup of nested MeterGroups: one per group
"""
if not use_appliance_metadata:
raise NotImplementedError()
values = self.values_for_appliance_metadata_key(key)
groups = []
for value in values:
group = self.select_using_appliances(**{key: value})
group.name = value
groups.append(group)
return MeterGroup(groups)
def get_timeframe(self):
"""
Returns
-------
nilmtk.TimeFrame representing the timeframe which is the union
of all meters in self.meters.
"""
timeframe = None
for meter in self.meters:
if timeframe is None:
timeframe = meter.get_timeframe()
elif meter.get_timeframe().empty:
pass
else:
timeframe = timeframe.union(meter.get_timeframe())
return timeframe
def plot(self, kind='separate lines', **kwargs):
"""
Parameters
----------
width : int, optional
Number of points on the x axis required
ax : matplotlib.axes, optional
plot_legend : boolean, optional
Defaults to True. Set to False to not plot legend.
kind : {'separate lines', 'sum', 'area', 'snakey', 'energy bar'}
timeframe : nilmtk.TimeFrame, optional
Defaults to self.get_timeframe()
"""
# Load data and plot each meter
function_map = {
'separate lines': self._plot_separate_lines,
'sum': super(MeterGroup, self).plot,
'area': self._plot_area,
'sankey': self._plot_sankey,
'energy bar': self._plot_energy_bar
}
try:
ax = function_map[kind](**kwargs)
except KeyError:
raise ValueError("'{}' not a valid setting for 'kind' parameter."
.format(kind))
return ax
def _plot_separate_lines(self, ax=None, plot_legend=True, **kwargs):
for meter in self.meters:
if isinstance(meter, MeterGroup):
ax = meter.plot(ax=ax, plot_legend=False, kind='sum', **kwargs)
else:
ax = meter.plot(ax=ax, plot_legend=False, **kwargs)
if plot_legend:
plt.legend()
return ax
def _plot_sankey(self):
graph = self.wiring_graph()
meter_labels = {meter: meter.instance() for meter in graph.nodes()}
pos = nx.graphviz_layout(graph, prog='dot')
#nx.draw(graph, pos, labels=meter_labels, arrows=False)
meter_labels = {meter: meter.label() for meter in graph.nodes()}
for meter, name in meter_labels.iteritems():
x, y = pos[meter]
if meter.is_site_meter():
delta_y = 5
else:
delta_y = -5
plt.text(x, y+delta_y, s=name, bbox=dict(facecolor='red', alpha=0.5), horizontalalignment='center')
if not meter.is_site_meter():
upstream_meter = meter.upstream_meter()
proportion_of_upstream = meter.proportion_of_upstream()
print(meter.instance(), upstream_meter.instance(), proportion_of_upstream)
graph[upstream_meter][meter]["weight"] = proportion_of_upstream*10
graph[upstream_meter][meter]["color"] = "blue"
nx.draw(graph, pos, labels=meter_labels, arrows=False)
def _plot_area(self, ax=None, timeframe=None, pretty_labels=True, unit='W',
label_kwargs=None, plot_kwargs=None, threshold=None,
**load_kwargs):
"""
Parameters
----------
plot_kwargs : dict of key word arguments for DataFrame.plot()
unit : {kW or W}
threshold : float or None
if set to a float then any measured value under this threshold
will be set to 0.
Returns
-------
ax, dataframe
"""
# Get start and end times for the plot
timeframe = self.get_timeframe() if timeframe is None else timeframe
if not timeframe:
return ax
load_kwargs['sections'] = [timeframe]
load_kwargs = self._set_sample_period(timeframe, **load_kwargs)
df = self.dataframe_of_meters(**load_kwargs)
if threshold is not None:
df[df <= threshold] = 0
if unit == 'kW':
df /= 1000
if plot_kwargs is None:
plot_kwargs = {}
df.columns = self.get_labels(df.columns, pretty=pretty_labels)
# Set a tiny linewidth otherwise we get lines even if power is zero
# and this looks ugly when drawn above other lines.
plot_kwargs.setdefault('linewidth', 0.0001)
ax = df.plot(kind='area', **plot_kwargs)
ax.set_ylabel("Power ({:s})".format(unit))
return ax, df
def plot_when_on(self, **load_kwargs):
meter_identifiers = list(self.identifier.meters)
fig, ax = plt.subplots()
for i, meter in enumerate(self.meters):
id_meter = meter.identifier
for chunk_when_on in meter.when_on(**load_kwargs):
series_to_plot = chunk_when_on[chunk_when_on==True]
if len(series_to_plot.index):
(series_to_plot+i-1).plot(ax=ax, style='k.')
labels = self.get_labels(meter_identifiers)
plt.yticks(range(len(self.meters)), labels)
plt.ylim((-0.5, len(self.meters)+0.5))
return ax
def plot_good_sections(self, ax=None, label_func='instance',
include_disabled_meters=True, load_kwargs=None,
**plot_kwargs):
"""
Parameters
----------
label_func : str or None
e.g. 'instance' (default) or 'label'
if None then no labels will be produced.
include_disabled_meters : bool
"""
if ax is None:
ax = plt.gca()
if load_kwargs is None:
load_kwargs = {}
# Prepare list of meters
if include_disabled_meters:
meters = self.all_meters()
else:
meters = self.meters
meters = copy(meters)
meters.sort(key=meter_sorting_key, reverse=True)
n = len(meters)
labels = []
for i, meter in enumerate(meters):
good_sections = meter.good_sections(**load_kwargs)
ax = good_sections.plot(ax=ax, y=i, **plot_kwargs)
del good_sections
if label_func:
labels.append(getattr(meter, label_func)())
# Just end numbers
if label_func is None:
labels = [n] + ([''] * (n-1))
# Y tick formatting
ax.set_yticks(np.arange(0, n) + 0.5)
def y_formatter(y, pos):
try:
label = labels[int(y)]
except IndexError:
label = ''
return label
ax.yaxis.set_major_formatter(FuncFormatter(y_formatter))
ax.set_ylim([0, n])
return ax
def _plot_energy_bar(self, ax=None, mains=None):
"""Plot a stacked bar of the energy per meter, in order.
Parameters
----------
ax : matplotlib axes
mains : MeterGroup or ElecMeter, optional
Used to calculate Remainder.
Returns
-------
ax
"""
energy = self.energy_per_meter(mains=mains, per_period='D',
use_meter_labels=True)
energy.sort(ascending=False)
# Plot
ax = pd.DataFrame(energy).T.plot(kind='bar', stacked=True, grid=True,
edgecolor="none", legend=False, width=2)
ax.set_xticks([])
ax.set_ylabel('kWh\nper\nday', rotation=0, ha='center', va='center',
labelpad=15)
cumsum = energy.cumsum()
text_ys = cumsum - (cumsum.diff().fillna(energy['Remainder']) / 2)
for kwh, (label, y) in zip(energy.values, text_ys.iteritems()):
label += " ({:.2f})".format(kwh)
ax.annotate(label, (0, y), color='white', size=8,
horizontalalignment='center',
verticalalignment='center')
return ax
def plot_multiple(self, axes, meter_keys, plot_func,
kwargs_per_meter=None, pretty_label=True, **kwargs):
"""Create multiple subplots.
Parameters
-----------
axes : list of matplotlib axes objects.
e.g. created using `fix, axes = plt.subplots()`
meter_keys : list of keys for identifying ElecMeters or MeterGroups.
e.g. ['fridge', 'kettle', 4, MeterGroupID, ElecMeterID].
Each element is anything that MeterGroup.__getitem__() accepts.
plot_func : string
Name of function from ElecMeter or Electric or MeterGroup
e.g. `plot_power_histogram`
kwargs_per_meter : dict
Provide key word arguments for the plot_func for each meter.
each key is a parameter name for plot_func
each value is a list (same length as `meters`) for specifying a value for
this parameter for each meter.
e.g. {'range': [(0,100), (0,200)]}
pretty_label : bool
**kwargs : any key word arguments to pass the same values to the
plot func for every meter.
Returns
-------
axes (flattened into a 1D list)
"""
axes = flatten_2d_list(axes)
if len(axes) != len(meter_keys):
raise ValueError("`axes` and `meters` must be of equal length.")
if kwargs_per_meter is None:
kwargs_per_meter = {}
meters = [self[meter_key] for meter_key in meter_keys]
for i, (ax, meter) in enumerate(zip(axes, meters)):
kwargs_copy = deepcopy(kwargs)
for parameter, arguments in kwargs_per_meter.iteritems():
kwargs_copy[parameter] = arguments[i]
getattr(meter, plot_func)(ax=ax, **kwargs_copy)
ax.set_title(meter.label(pretty=pretty_label))
return axes
def sort_meters(self):
"""Sorts meters by instance."""
self.meters.sort(key=meter_sorting_key)
def label(self, **kwargs):
"""
Returns
-------
string : A label listing all the appliance types.
"""
if self.name:
label = self.name
if kwargs.get('pretty'):
label = capitalise_first_letter(label)
return label
return ", ".join(set([meter.label(**kwargs) for meter in self.meters]))
def clear_cache(self):
"""Clear cache on all meters in this MeterGroup."""
for meter in self.meters:
meter.clear_cache()
def correlation_of_sum_of_submeters_with_mains(self, **load_kwargs):
print("Running MeterGroup.correlation_of_sum_of_submeters_with_mains...")
submeters = self.meters_directly_downstream_of_mains()
return self.mains().correlation(submeters, **load_kwargs)
def all_meters(self):
"""Returns a list of self.meters + self.disabled_meters."""
return self.meters + self.disabled_meters
def describe(self, compute_expensive_stats=True, **kwargs):
"""Returns pd.Series describing this MeterGroup."""
series = pd.Series()
all_meters = self.all_meters()
series['total_n_meters'] = len(all_meters)
site_meters = [m for m in all_meters if m.is_site_meter()]
series['total_n_site_meters'] = len(site_meters)
if compute_expensive_stats:
series['correlation_of_sum_of_submeters_with_mains'] = (
self.correlation_of_sum_of_submeters_with_mains(**kwargs))
series['proportion_of_energy_submetered'] = (
self.proportion_of_energy_submetered(**kwargs))
dropout_rates = self._collect_stats_on_all_meters(
kwargs, 'dropout_rate', False)
dropout_rates = np.array(dropout_rates)
series['dropout_rates_ignoring_gaps'] = (
"min={}, mean={}, max={}".format(
dropout_rates.min(),
dropout_rates.mean(),
dropout_rates.max()))
series['mains_sample_period'] = self.mains().sample_period()
series['submeter_sample_period'] = self.submeters().sample_period()
timeframe = self.get_timeframe()
series['timeframe'] = "start={}, end={}".format(timeframe.start, timeframe.end)
series['total_duration'] = str(timeframe.timedelta)
mains_uptime = self.mains().uptime(**kwargs)
series['mains_uptime'] = str(mains_uptime)
try:
series['proportion_uptime'] = (mains_uptime.total_seconds() /
timeframe.timedelta.total_seconds())
except ZeroDivisionError:
series['proportion_uptime'] = np.NaN
series['average_mains_energy_per_day'] = self.mains().average_energy_per_period()
return series
def replace_dataset(identifier, dataset):
"""
Parameters
----------
identifier : ElecMeterID or MeterGroupID
Returns
-------
ElecMeterID or MeterGroupID with dataset replaced with `dataset`
"""
if isinstance(identifier, MeterGroupID):
new_meter_ids = [replace_dataset(id, dataset) for id in identifier.meters]
new_id = MeterGroupID(meters=tuple(new_meter_ids))
elif isinstance(identifier, ElecMeterID):
new_id = identifier._replace(dataset=dataset)
else:
raise TypeError()
return new_id
def iterate_through_submeters_of_two_metergroups(master, slave):
"""
Parameters
----------
master, slave : MeterGroup
Returns
-------
list of 2-tuples of the form (`master_meter`, `slave_meter`)
"""
zipped = []
for master_meter in master.submeters().meters:
slave_identifier = replace_dataset(master_meter.identifier, slave.dataset())
slave_meter = slave[slave_identifier]
zipped.append((master_meter, slave_meter))
return zipped
def combine_chunks_from_generators(index, columns, meters, kwargs):
"""Combines chunks into a single DataFrame.
Adds or averages columns, depending on whether each column is in
PHYSICAL_QUANTITIES_TO_AVERAGE.
Returns
-------
DataFrame
"""
# Regarding columns (e.g. voltage) that we need to average:
# The approach is that we first add everything together
# in the first for-loop, whilst also keeping a
# `columns_to_average_counter` DataFrame
# which tells us what to divide by in order to compute the
# mean for PHYSICAL_QUANTITIES_TO_AVERAGE.
# Regarding doing an in-place addition:
# We convert out cumulator dataframe to a numpy matrix.
# This allows us to use np.add to do an in-place add.
# If we didn't do this then we'd get horrible memory fragmentation.
# See http://stackoverflow.com/a/27526721/732596
DTYPE = np.float32
cumulator = pd.DataFrame(np.NaN, index=index, columns=columns, dtype=DTYPE)
cumulator_arr = cumulator.as_matrix()
columns_to_average_counter = pd.DataFrame(dtype=np.uint16)
timeframe = None
# Go through each generator to try sum values together
for meter in meters:
print_on_line("\rLoading data for meter", meter.identifier, " ")
kwargs_copy = deepcopy(kwargs)
generator = meter.load(**kwargs_copy)
try:
chunk_from_next_meter = generator.next()
except StopIteration:
continue
del generator
del kwargs_copy
gc.collect()
if chunk_from_next_meter.empty or not chunk_from_next_meter.timeframe:
continue
if timeframe is None:
timeframe = chunk_from_next_meter.timeframe
else:
timeframe = timeframe.union(chunk_from_next_meter.timeframe)
# Add (in-place)
for i, column_name in enumerate(columns):
try:
column = chunk_from_next_meter[column_name]
except KeyError:
continue
aligned = column.reindex(index, copy=False).values
del column
cumulator_col = cumulator_arr[:,i]
where_both_are_nan = np.isnan(cumulator_col) & np.isnan(aligned)
np.nansum([cumulator_col, aligned], axis=0, out=cumulator_col,
dtype=DTYPE)
cumulator_col[where_both_are_nan] = np.NaN
del aligned
del where_both_are_nan
gc.collect()
# Update columns_to_average_counter - this is necessary so we do not
# add up columns like 'voltage' which should be averaged.
physical_quantities = chunk_from_next_meter.columns.get_level_values('physical_quantity')
columns_to_average = (set(PHYSICAL_QUANTITIES_TO_AVERAGE)
.intersection(physical_quantities))
if columns_to_average:
counter_increment = pd.DataFrame(1, columns=columns_to_average,
dtype=np.uint16,
index=chunk_from_next_meter.index)
columns_to_average_counter = columns_to_average_counter.add(
counter_increment, fill_value=0)
del counter_increment
del chunk_from_next_meter
gc.collect()
del cumulator_arr
gc.collect()
# Create mean values by dividing any columns which need dividing
for column in columns_to_average_counter:
cumulator[column] /= columns_to_average_counter[column]
del columns_to_average_counter
gc.collect()
print()
print("Done loading data all meters for this chunk.")
cumulator.timeframe = timeframe
return cumulator
meter_sorting_key = lambda meter: meter.instance()
| apache-2.0 |
avmarchenko/exatomic | exatomic/algorithms/displacement.py | 3 | 1635 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2018, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Computation of Displacement
############################
"""
import numpy as np
import pandas as pd
def absolute_squared_displacement(universe, ref_frame=None):
"""
Compute the mean squared displacement per atom per time with respect to the
referenced position.
Computes the squared displacement using the :class:`~exatomic.atom.Atom`
dataframe. In the case where this dataframe only contains the in unit cell
coordinates, this may not give desired results.
Args:
universe (:class:`~exatomic.Universe`): The universe containing atomic positions
ref_frame (int): Which frame to use as the reference (default first frame)
Returns
df (:class:`~pandas.DataFrame`): Time dependent displacement per atom
"""
index = 0
if ref_frame is None:
ref_frame = universe.frame.index[index]
else:
frames = universe.frame.index.values
ref_frame = np.where(frames == ref_frame)
if 'label' not in universe.atom.columns:
universe.atom['label'] = universe.atom.get_atom_labels()
groups = universe.atom.groupby('label')
msd = np.empty((groups.ngroups, ), dtype='O')
for i, (_, group) in enumerate(groups):
xyz = group[['x', 'y', 'z']].values
msd[i] = ((xyz - xyz[0])**2).sum(axis=1)
df = pd.DataFrame.from_records(msd).T
df.index = universe.frame.index.copy()
df.columns = universe.atom['label'].unique()
return df
| apache-2.0 |
saiwing-yeung/scikit-learn | sklearn/datasets/tests/test_20news.py | 280 | 3045 | """Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
| bsd-3-clause |
pprett/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 63 | 3231 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import itertools
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
class_names = iris.target_names
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
hugobowne/scikit-learn | sklearn/kernel_approximation.py | 258 | 17973 | """
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .utils.validation import check_is_fitted
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(http://www.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X < 0).any():
raise ValueError("X may not contain entries smaller than zero.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
msg = ("%(name)s is not fitted. Call fit to set the parameters before"
" calling transform")
check_is_fitted(self, "sample_interval_", msg=msg)
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
X = check_array(X, accept_sparse='csr')
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X, accept_sparse='csr')
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**kernel_params)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
| bsd-3-clause |
adrn/streams | streams/io/tests/test_lm10.py | 1 | 4462 | # coding: utf-8
"""
Make sure the satellite starting position coincides with the particles
"""
from __future__ import absolute_import, unicode_literals, division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os, sys
# Third-party
import astropy.units as u
from astropy.constants import G
import matplotlib.pyplot as plt
import numpy as np
import pytest
from ... import usys
from ..lm10 import LM10Simulation
plot_path = "plots/tests/io/lm10"
if not os.path.exists(plot_path):
os.makedirs(plot_path)
lm10 = LM10Simulation()
particles = lm10.particles(expr="Pcol==-1")
particles = particles.decompose(usys)
satellite = lm10.satellite()
satellite = satellite.decompose(usys)
# Here are the true parameters from the last block in R601LOG
GG = G.decompose(bases=[u.kpc,u.M_sun,u.Myr]).value
X = (GG / 0.85**3 * 6.4E8)**-0.5
length_unit = u.Unit("0.85 kpc")
mass_unit = u.Unit("6.4E8 M_sun")
time_unit = u.Unit("{:08f} Myr".format(X))
r0 = np.array([[2.3279727753E+01,2.8190329987,-6.8798148785]])*length_unit
v0 = np.array([[3.9481694047,-6.1942673069E-01,3.4555581435]])*length_unit/time_unit
law_r = np.squeeze(r0.decompose(usys).value)
law_v = np.squeeze(v0.decompose(usys).value)
p_kwargs = dict(marker='.', linestyle='none', color='k', alpha=0.1)
s_kwargs = dict(marker='o', linestyle='none', color='r', alpha=0.75,
markersize=10)
l_kwargs = dict(marker='^', linestyle='none', color='g', alpha=0.75,
markersize=10)
def test_position():
fig,axes = plt.subplots(2, 2, figsize=(10,10))
axes[0,1].set_visible(False)
axes[0,0].plot(particles["x"].value,
particles["y"].value,
label="all particles", **p_kwargs)
axes[1,0].plot(particles["x"].value,
particles["z"].value,
**p_kwargs)
axes[1,1].plot(particles["y"].value,
particles["z"].value,
**p_kwargs)
axes[0,0].plot(satellite["x"].value,
satellite["y"].value,
label="Satellite", **s_kwargs)
axes[1,0].plot(satellite["x"].value,
satellite["z"].value,
**s_kwargs)
axes[1,1].plot(satellite["y"].value,
satellite["z"].value,
**s_kwargs)
axes[0,0].plot(law_r[0], law_r[1], label="Law", **l_kwargs)
axes[1,0].plot(law_r[0], law_r[2], **l_kwargs)
axes[1,1].plot(law_r[1], law_r[2], **l_kwargs)
sz = 2
axes[0,0].set_xlim(law_r[0]-sz, law_r[0]+sz)
axes[0,0].set_ylim(law_r[1]-sz, law_r[1]+sz)
axes[1,0].set_xlim(law_r[0]-sz, law_r[0]+sz)
axes[1,0].set_ylim(law_r[2]-sz, law_r[2]+sz)
axes[1,1].set_xlim(law_r[1]-sz, law_r[1]+sz)
axes[1,1].set_ylim(law_r[2]-sz, law_r[2]+sz)
axes[0,0].legend(fontsize=10)
fig.subplots_adjust(hspace=0.02,wspace=0.02)
fig.savefig(os.path.join(plot_path, "sat_ptcl_positions.png"))
def test_velocity():
fig,axes = plt.subplots(2, 2, figsize=(10,10))
axes[0,1].set_visible(False)
axes[0,0].plot(particles["vx"].value,
particles["vy"].value,
label="all particles", **p_kwargs)
axes[1,0].plot(particles["vx"].value,
particles["vz"].value,
**p_kwargs)
axes[1,1].plot(particles["vy"].value,
particles["vz"].value,
**p_kwargs)
axes[0,0].plot(satellite["vx"].value,
satellite["vy"].value,
label="Satellite", **s_kwargs)
axes[1,0].plot(satellite["vx"].value,
satellite["vz"].value,
**s_kwargs)
axes[1,1].plot(satellite["vy"].value,
satellite["vz"].value,
**s_kwargs)
axes[0,0].plot(law_v[0], law_v[1], label="Law", **l_kwargs)
axes[1,0].plot(law_v[0], law_v[2], **l_kwargs)
axes[1,1].plot(law_v[1], law_v[2], **l_kwargs)
sz = (50*u.km/u.s).decompose(usys).value
axes[0,0].set_xlim(law_v[0]-sz, law_v[0]+sz)
axes[0,0].set_ylim(law_v[1]-sz, law_v[1]+sz)
axes[1,0].set_xlim(law_v[0]-sz, law_v[0]+sz)
axes[1,0].set_ylim(law_v[2]-sz, law_v[2]+sz)
axes[1,1].set_xlim(law_v[1]-sz, law_v[1]+sz)
axes[1,1].set_ylim(law_v[2]-sz, law_v[2]+sz)
axes[0,0].legend(fontsize=10)
fig.subplots_adjust(hspace=0.02,wspace=0.02)
fig.savefig(os.path.join(plot_path, "sat_ptcl_velocities.png")) | mit |
YinongLong/scikit-learn | sklearn/semi_supervised/tests/test_label_propagation.py | 307 | 1974 | """ test the label propagation module """
import nose
import numpy as np
from sklearn.semi_supervised import label_propagation
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2})
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
nose.tools.assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
| bsd-3-clause |
nowls/gnuradio | gr-filter/examples/decimate.py | 58 | 6061 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys, time
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
from scipy import fftpack
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 10000000 # number of samples to use
self._fs = 10000 # initial sampling rate
self._decim = 20 # Decimation rate
# Generate the prototype filter taps for the decimators with a 200 Hz bandwidth
self._taps = filter.firdes.low_pass_2(1, self._fs,
200, 150,
attenuation_dB=120,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._decim))
print "Number of taps: ", len(self._taps)
print "Number of filters: ", self._decim
print "Taps per channel: ", tpc
# Build the input signal source
# We create a list of freqs, and a sine wave is generated and added to the source
# for each one of these frequencies.
self.signals = list()
self.add = blocks.add_cc()
freqs = [10, 20, 2040]
for i in xrange(len(freqs)):
self.signals.append(analog.sig_source_c(self._fs, analog.GR_SIN_WAVE, freqs[i], 1))
self.connect(self.signals[i], (self.add,i))
self.head = blocks.head(gr.sizeof_gr_complex, self._N)
# Construct a PFB decimator filter
self.pfb = filter.pfb.decimator_ccf(self._decim, self._taps, 0)
# Construct a standard FIR decimating filter
self.dec = filter.fir_filter_ccf(self._decim, self._taps)
self.snk_i = blocks.vector_sink_c()
# Connect the blocks
self.connect(self.add, self.head, self.pfb)
self.connect(self.add, self.snk_i)
# Create the sink for the decimated siganl
self.snk = blocks.vector_sink_c()
self.connect(self.pfb, self.snk)
def main():
tb = pfb_top_block()
tstart = time.time()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig1 = pylab.figure(1, figsize=(16,9))
fig2 = pylab.figure(2, figsize=(16,9))
Ns = 10000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._fs
# Plot the input to the decimator
d = tb.snk_i.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b")
p1_t = sp1_t.plot(t_in, x_in.imag, "r")
sp1_t.set_ylim([-tb._decim*1.1, tb._decim*1.1])
sp1_t.set_xlabel("Time (s)")
sp1_t.set_ylabel("Amplitude")
# Plot the output of the decimator
fs_o = tb._fs / tb._decim
sp2_f = fig2.add_subplot(2, 1, 1)
d = tb.snk.data()[Ns:Ns+Ne]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+1])
sp2_f.set_ylim([-200.0, 50.0])
sp2_f.set_title("PFB Decimated Signal", weight="bold")
sp2_f.set_xlabel("Frequency (Hz)")
sp2_f.set_ylabel("Power (dBW)")
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_t = fig2.add_subplot(2, 1, 2)
p2_t = sp2_t.plot(t_o, x_o.real, "b-o")
p2_t = sp2_t.plot(t_o, x_o.imag, "r-o")
sp2_t.set_ylim([-2.5, 2.5])
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
sandeepgupta2k4/tensorflow | tensorflow/contrib/learn/python/learn/estimators/kmeans.py | 34 | 10130 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of k-means clustering on top of tf.learn API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.contrib.factorization.python.ops import clustering_ops
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModelFnOps
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.summary import summary
from tensorflow.python.ops.control_flow_ops import with_dependencies
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import session_run_hook
from tensorflow.python.training.session_run_hook import SessionRunArgs
class _LossRelativeChangeHook(session_run_hook.SessionRunHook):
"""Stops when the change in loss goes below a tolerance."""
def __init__(self, tolerance):
"""Initializes _LossRelativeChangeHook.
Args:
tolerance: A relative tolerance of change between iterations.
"""
self._tolerance = tolerance
self._prev_loss = None
def begin(self):
self._loss_tensor = ops.get_default_graph().get_tensor_by_name(
KMeansClustering.LOSS_OP_NAME + ':0')
assert self._loss_tensor is not None
def before_run(self, run_context):
del run_context
return SessionRunArgs(
fetches={KMeansClustering.LOSS_OP_NAME: self._loss_tensor})
def after_run(self, run_context, run_values):
loss = run_values.results[KMeansClustering.LOSS_OP_NAME]
assert loss is not None
if self._prev_loss is not None:
relative_change = (abs(loss - self._prev_loss) /
(1 + abs(self._prev_loss)))
if relative_change < self._tolerance:
run_context.request_stop()
self._prev_loss = loss
class _InitializeClustersHook(session_run_hook.SessionRunHook):
"""Initializes clusters or waits for cluster initialization."""
def __init__(self, init_op, is_initialized_op, is_chief):
self._init_op = init_op
self._is_chief = is_chief
self._is_initialized_op = is_initialized_op
def after_create_session(self, session, _):
assert self._init_op.graph == ops.get_default_graph()
assert self._is_initialized_op.graph == self._init_op.graph
while True:
try:
if session.run(self._is_initialized_op):
break
elif self._is_chief:
session.run(self._init_op)
else:
time.sleep(1)
except RuntimeError as e:
logging.info(e)
def _parse_tensor_or_dict(features):
"""Helper function to parse features."""
if isinstance(features, dict):
keys = sorted(features.keys())
with ops.colocate_with(features[keys[0]]):
features = array_ops.concat([features[k] for k in keys], 1)
return features
def _kmeans_clustering_model_fn(features, labels, mode, params, config):
"""Model function for KMeansClustering estimator."""
assert labels is None, labels
(all_scores, model_predictions, losses,
is_initialized, init_op, training_op) = clustering_ops.KMeans(
_parse_tensor_or_dict(features),
params.get('num_clusters'),
initial_clusters=params.get('training_initial_clusters'),
distance_metric=params.get('distance_metric'),
use_mini_batch=params.get('use_mini_batch'),
mini_batch_steps_per_iteration=params.get(
'mini_batch_steps_per_iteration'),
random_seed=params.get('random_seed'),
kmeans_plus_plus_num_retries=params.get(
'kmeans_plus_plus_num_retries')).training_graph()
incr_step = state_ops.assign_add(variables.get_global_step(), 1)
loss = math_ops.reduce_sum(losses, name=KMeansClustering.LOSS_OP_NAME)
summary.scalar('loss/raw', loss)
training_op = with_dependencies([training_op, incr_step], loss)
predictions = {
KMeansClustering.ALL_SCORES: all_scores[0],
KMeansClustering.CLUSTER_IDX: model_predictions[0],
}
eval_metric_ops = {KMeansClustering.SCORES: loss}
training_hooks = [_InitializeClustersHook(
init_op, is_initialized, config.is_chief)]
relative_tolerance = params.get('relative_tolerance')
if relative_tolerance is not None:
training_hooks.append(_LossRelativeChangeHook(relative_tolerance))
return ModelFnOps(
mode=mode,
predictions=predictions,
eval_metric_ops=eval_metric_ops,
loss=loss,
train_op=training_op,
training_hooks=training_hooks)
# TODO(agarwal,ands): support sharded input.
class KMeansClustering(estimator.Estimator):
"""An Estimator for K-Means clustering."""
SQUARED_EUCLIDEAN_DISTANCE = clustering_ops.SQUARED_EUCLIDEAN_DISTANCE
COSINE_DISTANCE = clustering_ops.COSINE_DISTANCE
RANDOM_INIT = clustering_ops.RANDOM_INIT
KMEANS_PLUS_PLUS_INIT = clustering_ops.KMEANS_PLUS_PLUS_INIT
SCORES = 'scores'
CLUSTER_IDX = 'cluster_idx'
CLUSTERS = 'clusters'
ALL_SCORES = 'all_scores'
LOSS_OP_NAME = 'kmeans_loss'
def __init__(self,
num_clusters,
model_dir=None,
initial_clusters=RANDOM_INIT,
distance_metric=SQUARED_EUCLIDEAN_DISTANCE,
random_seed=0,
use_mini_batch=True,
mini_batch_steps_per_iteration=1,
kmeans_plus_plus_num_retries=2,
relative_tolerance=None,
config=None):
"""Creates a model for running KMeans training and inference.
Args:
num_clusters: number of clusters to train.
model_dir: the directory to save the model results and log files.
initial_clusters: specifies how to initialize the clusters for training.
See clustering_ops.kmeans for the possible values.
distance_metric: the distance metric used for clustering.
See clustering_ops.kmeans for the possible values.
random_seed: Python integer. Seed for PRNG used to initialize centers.
use_mini_batch: If true, use the mini-batch k-means algorithm. Else assume
full batch.
mini_batch_steps_per_iteration: number of steps after which the updated
cluster centers are synced back to a master copy. See clustering_ops.py
for more details.
kmeans_plus_plus_num_retries: For each point that is sampled during
kmeans++ initialization, this parameter specifies the number of
additional points to draw from the current distribution before selecting
the best. If a negative value is specified, a heuristic is used to
sample O(log(num_to_sample)) additional points.
relative_tolerance: A relative tolerance of change in the loss between
iterations. Stops learning if the loss changes less than this amount.
Note that this may not work correctly if use_mini_batch=True.
config: See Estimator
"""
params = {}
params['num_clusters'] = num_clusters
params['training_initial_clusters'] = initial_clusters
params['distance_metric'] = distance_metric
params['random_seed'] = random_seed
params['use_mini_batch'] = use_mini_batch
params['mini_batch_steps_per_iteration'] = mini_batch_steps_per_iteration
params['kmeans_plus_plus_num_retries'] = kmeans_plus_plus_num_retries
params['relative_tolerance'] = relative_tolerance
super(KMeansClustering, self).__init__(
model_fn=_kmeans_clustering_model_fn,
params=params,
model_dir=model_dir,
config=config)
def predict_cluster_idx(self, input_fn=None):
"""Yields predicted cluster indices."""
key = KMeansClustering.CLUSTER_IDX
results = super(KMeansClustering, self).predict(
input_fn=input_fn, outputs=[key])
for result in results:
yield result[key]
def score(self, input_fn=None, steps=None):
"""Predict total sum of distances to nearest clusters.
Note that this function is different from the corresponding one in sklearn
which returns the negative of the sum of distances.
Args:
input_fn: see predict.
steps: see predict.
Returns:
Total sum of distances to nearest clusters.
"""
return np.sum(
self.evaluate(
input_fn=input_fn, steps=steps)[KMeansClustering.SCORES])
def transform(self, input_fn=None, as_iterable=False):
"""Transforms each element to distances to cluster centers.
Note that this function is different from the corresponding one in sklearn.
For SQUARED_EUCLIDEAN distance metric, sklearn transform returns the
EUCLIDEAN distance, while this function returns the SQUARED_EUCLIDEAN
distance.
Args:
input_fn: see predict.
as_iterable: see predict
Returns:
Array with same number of rows as x, and num_clusters columns, containing
distances to the cluster centers.
"""
key = KMeansClustering.ALL_SCORES
results = super(KMeansClustering, self).predict(
input_fn=input_fn,
outputs=[key],
as_iterable=as_iterable)
if not as_iterable:
return results[key]
else:
return results
def clusters(self):
"""Returns cluster centers."""
return super(KMeansClustering, self).get_variable_value(self.CLUSTERS)
| apache-2.0 |
mughanibu/Deep-Learning-for-Inverse-Problems | PLOT.py | 1 | 2433 | import matplotlib.pyplot as plt
import pickle, glob
import numpy as np
import sys
psnr_prefix = './psnr/*'
psnr_paths = sorted(glob.glob(psnr_prefix))
psnr_means = {}
def filter_by_scale(row, scale):
return row[-1]==scale
for i, psnr_path in enumerate(psnr_paths):
print ""
print psnr_path
psnr_dict = None
epoch = str(i)#psnr_path.split("_")[-1]
with open(psnr_path, 'rb') as f:
psnr_dict = pickle.load(f)
dataset_keys = psnr_dict.keys()
for j, key in enumerate(dataset_keys):
print 'dataset', key
psnr_list = psnr_dict[key]
psnr_np = np.array(psnr_list)
psnr_np_2 = psnr_np[np.array([filter_by_scale(row,2) for row in psnr_np])]
psnr_np_3 = psnr_np[np.array([filter_by_scale(row,3) for row in psnr_np])]
psnr_np_4 = psnr_np[np.array([filter_by_scale(row,4) for row in psnr_np])]
print "x2:",np.mean(psnr_np_2, axis=0).tolist()
print "x3:",np.mean(psnr_np_3, axis=0).tolist()
print "x4:",np.mean(psnr_np_4, axis=0).tolist()
mean_2 = np.mean(psnr_np_2, axis=0).tolist()
mean_3 = np.mean(psnr_np_3, axis=0).tolist()
mean_4 = np.mean(psnr_np_4, axis=0).tolist()
psnr_mean = [mean_2, mean_3, mean_4]
#print 'psnr mean', psnr_mean
if psnr_means.has_key(key):
psnr_means[key][epoch] = psnr_mean
else:
psnr_means[key] = {epoch: psnr_mean}
#sys.exit(1)
keys = psnr_means.keys()
for i, key in enumerate(keys):
psnr_dict = psnr_means[key]
epochs = sorted(psnr_dict.keys())
x_axis = []
bicub_mean = []
vdsr_mean_2 = []
vdsr_mean_3 = []
vdsr_mean_4 = []
for epoch in epochs:
print epoch
print psnr_dict[epoch]
x_axis.append(int(epoch))
bicub_mean.append(psnr_dict[epoch][0][0])
vdsr_mean_2.append(psnr_dict[epoch][0][1])
vdsr_mean_3.append(psnr_dict[epoch][1][1])
vdsr_mean_4.append(psnr_dict[epoch][2][1])
plt.figure(i)
print key
print len(x_axis), len(bicub_mean), len(vdsr_mean_2)
print vdsr_mean_2
print "x2", np.argmax(vdsr_mean_2), np.max(vdsr_mean_2)
print "x3", np.argmax(vdsr_mean_3), np.max(vdsr_mean_3)
print "x4", np.argmax(vdsr_mean_4), np.max(vdsr_mean_4)
lines_bicub = plt.plot(vdsr_mean_2, 'g')
lines_bicub = plt.plot(vdsr_mean_4, 'b', vdsr_mean_3, 'y')
plt.setp(lines_bicub, linewidth=3.0)
#plt.show()
"""
psnr_means :
{
'DATASET_NAME' :
{
'EPOCH' : [bicubic psnr, vdsr psnr]
}
'DATASET_NAME_2':
{
'EPOCH' : [bicubic psnr, vdsr psnr]
}
...
}
"""
for i, psnr_path in enumerate(psnr_paths):
print i, psnr_path
| mit |
mschmidt87/nest-simulator | extras/ConnPlotter/examples/connplotter_tutorial.py | 18 | 27730 | # -*- coding: utf-8 -*-
#
# connplotter_tutorial.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# !========================
# ! ConnPlotter: A Tutorial
# !========================
# !
# ! :Author: Hans Ekkehard Plesser
# ! :Institution: Norwegian University of Life Sciences, Simula
# ! Research Laboratory, RIKEN Brain Sciences Institute
# ! :Version: 0.7
# ! :Date: 1 December 2009
# ! :Copyright: Hans Ekkehard Plesser
# ! :License: Creative Commons Attribution-Noncommercial-Share Alike License
# ! v 3.0
# !
# ! :Note: For best results, you should run this script with PyReport by
# ! Gael Varoquaux, available from
# ! http://gael-varoquaux.info/computers/pyreport/
# !
# ! Please set using_pyreport to True if you want to run the
# ! script through pyreport. Otherwise, figures will not be captured
# ! correctly.
using_pyreport = False
# ! Introduction
# !=============
# ! This tutorial gives a brief introduction to the ConnPlotter
# ! toolbox. It is by no means complete.
# ! Avoid interactive backend when using pyreport
if using_pyreport:
import matplotlib
matplotlib.use("Agg")
# ! Import pylab to call pylab.show() so that pyreport
# ! can capture figures created. Must come before import
# ! ConnPlotter so we get the correct show().
import pylab
# ! If not using pyreport, disable pylab.show() until we reach end of script
if not using_pyreport:
pylab_show = pylab.show
def nop(s=None):
pass
pylab.show = nop
# ! Import ConnPlotter and its examples
import ConnPlotter as cpl
import ConnPlotter.examples as ex
# ! Turn of warnings about resized figure windows
import warnings
warnings.simplefilter("ignore")
# ! Define a helper function to show LaTeX tables on the fly
def showTextTable(connPattern, fileTrunk):
"""
Shows a Table of Connectivity as textual table.
Arguments:
connPattern ConnectionPattern instance
fileTrunk Eventual PNG image will be fileTrunk.png
"""
import subprocess as subp # to call LaTeX etc
import os # to remove files
# Write to LaTeX file so we get a nice textual representation
# We want a complete LaTeX document, so we set ``standalone``
# to ``True``.
connPattern.toLaTeX(file=fileTrunk + '.tex', standalone=True,
enumerate=True)
# Create PDF, crop, and convert to PNG
try:
devnull = open('/dev/null', 'w')
subp.call(['pdflatex', fileTrunk], stdout=devnull, stderr=subp.STDOUT)
# need wrapper, since pdfcrop does not begin with #!
subp.call(['pdfcrop ' + fileTrunk + '.pdf ' + fileTrunk + '-crop.pdf'],
shell=True,
stdout=devnull, stderr=subp.STDOUT)
devnull.close()
os.rename(fileTrunk + '-crop.pdf', fileTrunk + '.pdf')
for suffix in ('.tex', '-crop.pdf', '.png', '.aux', '.log'):
if os.path.exists(fileTrunk + suffix):
os.remove(fileTrunk + suffix)
except:
raise Exception('Could not create PDF Table.')
# ! Simple network
# ! ==============
# ! This is a simple network with two layers A and B; layer B has two
# ! populations, E and I. On the NEST side, we use only synapse type
# ! ``static_synapse``. ConnPlotter then infers that synapses with positive
# ! weights should have type ``exc``, those with negative weight type ``inh``.
# ! Those two types are know to ConnPlotter.
# ! Obtain layer, connection and model list from the example set
s_layer, s_conn, s_model = ex.simple()
# ! Create Connection Pattern representation
s_cp = cpl.ConnectionPattern(s_layer, s_conn)
# ! Show pattern as textual table (we cheat a little and include PDF directly)
showTextTable(s_cp, 'simple_tt')
# $ \centerline{\includegraphics{simple_tt.pdf}}
# ! Show pattern in full detail
# ! ---------------------------
# ! A separate patch is shown for each pair of populations.
# !
# ! - Rows represent senders, columns targets.
# ! - Layer names are given to the left/above, population names to the right
# ! and below.
# ! - Excitatory synapses shown in blue, inhibitory in red.
# ! - Each patch has its own color scale.
s_cp.plot()
pylab.show()
# ! Let us take a look at what this connection pattern table shows:
# !
# ! - The left column, with header "A", is empty: The "A" layer receives
# ! no input.
# ! - The right column shows input to layer "B"
# !
# ! * The top row, labeled "A", has two patches in the "B" column:
# !
# ! + The left patch shows relatively focused input to the "E" population
# ! in layer "B" (first row of "Connectivity" table).
# ! + The right patch shows wider input to the "I" population in layer
# ! "B" (second row of "Connectivity" table).
# ! + Patches are red, indicating excitatory connections.
# ! + In both cases, mask are circular, and the product of connection
# ! weight and probability is independent of the distance between sender
# ! and target neuron.
# !
# ! * The grey rectangle to the bottom right shows all connections from
# ! layer "B" populations to layer "B" populations. It is subdivided into
# ! two rows and two columns:
# !
# ! + Left column: inputs to the "E" population.
# ! + Right column: inputs to the "I" population.
# ! + Top row: projections from the "E" population.
# ! + Bottom row: projections from the "I" population.
# ! + There is only one type of synapse for each sender-target pair,
# ! so there is only a single patch per pair.
# ! + Patches in the top row, from population "E" show excitatory
# ! connections, thus they are red.
# ! + Patches in the bottom row, from population "I" show inhibitory
# ! connections, thus they are blue.
# ! + The patches in detail are:
# !
# ! - **E to E** (top-left, row 3+4 in table): two rectangular
# ! projections at 90 degrees.
# ! - **E to I** (top-right, row 5 in table): narrow gaussian projection.
# ! - **I to E** (bottom-left, row 6 in table): wider gaussian projection
# ! - **I to I** (bottom-right, row 7 in table): circular projection
# ! covering entire layer.
# !
# ! - **NB:** Color scales are different, so one **cannot** compare connection
# ! strengths between patches.
# ! Full detail, common color scale
# ! -------------------------------
s_cp.plot(globalColors=True)
pylab.show()
# ! This figure shows the same data as the one above, but now all patches use
# ! a common color scale, so full intensity color (either red or blue)
# ! indicates the strongest connectivity. From this we see that
# !
# ! - A to B/E is stronger than A to B/I
# ! - B/E to B/I is the strongest of all connections at the center
# ! - B/I to B/E is stronger than B/I to B/I
# ! Aggregate by groups
# ! -------------------
# ! For each pair of population groups, sum connections of the same type
# ! across populations.
s_cp.plot(aggrGroups=True)
pylab.show()
# ! In the figure above, all excitatory connections from B to B layer have been
# ! combined into one patch, as have all inhibitory connections from B to B.
# ! In the upper-right corner, all connections from layer A to layer B have
# ! been combined; the patch for inhibitory connections is missing, as there
# ! are none.
# ! Aggregate by groups and synapse models
# ! --------------------------------------
s_cp.plot(aggrGroups=True, aggrSyns=True)
pylab.show()
# ! When aggregating across synapse models, excitatory and inhibitory
# ! connections are combined. By default, excitatory connections are weights
# ! with +1, inhibitory connections with -1 in the sum. This may yield kernels
# ! with positive and negative values. They are shown on a red-white-blue scale
# ! as follows:
# !
# ! - White always represents 0.
# ! - Positive values are represented by increasingly saturated red.
# ! - Negative values are represented by increasingly saturated blue.
# ! - Colorscales are separate for red and blue:
# !
# ! * largest positive value: fully saturated red
# ! * largest negative value: fully saturated blue
# !
# ! - Each patch has its own colorscales.
# ! - When ``aggrSyns=True`` is combined with ``globalColors=True``,
# ! all patches use the same minimum and maximum in their red and blue
# ! color scales. The the minimum is the negative of the maximum, so that
# ! blue and red intesities can be compared.
s_cp.plot(aggrGroups=True, aggrSyns=True, globalColors=True)
pylab.show()
# ! - We can explicitly set the limits of the color scale; if values exceeding
# ! the limits are present, this is indicated by an arrowhead at the end of
# ! the colorbar. User-defined color limits need not be symmetric about 0.
s_cp.plot(aggrGroups=True, aggrSyns=True, globalColors=True,
colorLimits=[-2, 3])
pylab.show()
# ! Save pattern to file
# ! --------------------
# s_cp.plot(file='simple_example.png')
# ! This saves the detailed diagram to the given file. If you want to save
# ! the pattern in several file formats, you can pass a tuple of file names,
# ! e.g., ``s_cp.plot(file=('a.eps', 'a.png'))``.
# !
# ! **NB:** Saving directly to PDF may lead to files with artifacts. We
# ! recommend to save to EPS and the convert to PDF.
# ! Build network in NEST
# ! ---------------------
import nest
import nest.topology as topo
# ! Create models
for model in s_model:
nest.CopyModel(model[0], model[1], model[2])
# ! Create layers, store layer info in Python variable
for layer in s_layer:
exec ('%s = topo.CreateLayer(layer[1])' % layer[0])
# ! Create connections, need to insert variable names
for conn in s_conn:
eval('topo.ConnectLayers(%s,%s,conn[2])' % (conn[0], conn[1]))
nest.Simulate(10)
# ! **Ooops:*** Nothing happened? Well, it did, but pyreport cannot capture the
# ! output directly generated by NEST. The absence of an error message in this
# ! place shows that network construction and simulation went through.
# ! Inspecting the connections actually created
# ! :::::::::::::::::::::::::::::::::::::::::::
# ! The following block of messy and makeshift code plots the targets of the
# ! center neuron of the B/E population in the B/E and the B/I populations.
B_top = nest.GetStatus(RG, 'topology')[0]
ctr_id = topo.GetElement(RG,
[int(B_top['rows'] / 2), int(B_top['columns'] / 2)])
# find excitatory element in B
E_id = [gid for gid in ctr_id
if nest.GetStatus([gid], 'model')[0] == 'E']
# get all targets, split into excitatory and inhibitory
alltgts = nest.GetStatus(
nest.GetConnections(E_id, synapse_model='static_synapse'), 'target')
Etgts = [t for t in alltgts if nest.GetStatus([t], 'model')[0] == 'E']
Itgts = [t for t in alltgts if nest.GetStatus([t], 'model')[0] == 'I']
# obtain positions of targets
Etpos = tuple(zip(*topo.GetPosition(Etgts)))
Itpos = tuple(zip(*topo.GetPosition(Itgts)))
# plot excitatory
pylab.clf()
pylab.subplot(121)
pylab.scatter(Etpos[0], Etpos[1])
ctrpos = pylab.array(topo.GetPosition(E_id)[0])
ax = pylab.gca()
ax.add_patch(pylab.Circle(ctrpos, radius=0.02, zorder=99,
fc='r', alpha=0.4, ec='none'))
ax.add_patch(
pylab.Rectangle(ctrpos + pylab.array((-0.4, -0.2)), 0.8, 0.4, zorder=1,
fc='none', ec='r', lw=3))
ax.add_patch(
pylab.Rectangle(ctrpos + pylab.array((-0.2, -0.4)), 0.4, 0.8, zorder=1,
fc='none', ec='r', lw=3))
ax.add_patch(
pylab.Rectangle(ctrpos + pylab.array((-0.5, -0.5)), 1.0, 1.0, zorder=1,
fc='none', ec='k', lw=3))
ax.set(aspect='equal', xlim=[-0.5, 0.5], ylim=[-0.5, 0.5],
xticks=[], yticks=[])
# plot inhibitory
pylab.subplot(122)
pylab.scatter(Itpos[0], Itpos[1])
ctrpos = topo.GetPosition(E_id)[0]
ax = pylab.gca()
ax.add_patch(pylab.Circle(ctrpos, radius=0.02, zorder=99,
fc='r', alpha=0.4, ec='none'))
ax.add_patch(pylab.Circle(ctrpos, radius=0.1, zorder=2,
fc='none', ec='r', lw=2, ls='dashed'))
ax.add_patch(pylab.Circle(ctrpos, radius=0.2, zorder=2,
fc='none', ec='r', lw=2, ls='dashed'))
ax.add_patch(pylab.Circle(ctrpos, radius=0.3, zorder=2,
fc='none', ec='r', lw=2, ls='dashed'))
ax.add_patch(pylab.Circle(ctrpos, radius=0.5, zorder=2,
fc='none', ec='r', lw=3))
ax.add_patch(pylab.Rectangle((-0.5, -0.5), 1.0, 1.0, zorder=1,
fc='none', ec='k', lw=3))
ax.set(aspect='equal', xlim=[-0.5, 0.5], ylim=[-0.5, 0.5],
xticks=[], yticks=[])
pylab.show()
# ! Thick red lines mark the mask, dashed red lines to the right one, two and
# ! three standard deviations. The sender location is marked by the red spot
# ! in the center. Layers are 40x40 in size.
# ! A more complex network
# ! ======================
# !
# ! This network has layers A and B, with E and I populations in B. The added
# ! complexity comes from the fact that we now have four synapse types: AMPA,
# ! NMDA, GABA_A and GABA_B. These synapse types are known to ConnPlotter.
# ! Setup and tabular display
c_layer, c_conn, c_model = ex.complex()
c_cp = cpl.ConnectionPattern(c_layer, c_conn)
showTextTable(c_cp, 'complex_tt')
# $ \centerline{\includegraphics{complex_tt.pdf}}
# ! Pattern in full detail
# ! ----------------------
c_cp.plot()
pylab.show()
# ! Note the following differences to the simple pattern case:
# !
# ! - For each pair of populations, e.g., B/E as sender and B/E as target,
# ! we now have two patches representing AMPA and NMDA synapse for the E
# ! population, GABA_A and _B for the I population.
# ! - Colors are as follows:
# !
# ! :AMPA: red
# ! :NMDA: orange
# ! :GABA_A: blue
# ! :GABA_B: purple
# ! - Note that the horizontal rectangular pattern (table line 3) describes
# ! AMPA synapses, while the vertical rectangular pattern (table line 4)
# ! describes NMDA synapses.
# ! Full detail, common color scale
# ! -------------------------------
c_cp.plot(globalColors=True)
pylab.show()
# ! As above, but now with a common color scale.
# ! **NB:** The patch for the B/I to B/I connection may look empty, but it
# ! actually shows a very light shade of red. Rules are as follows:
# !
# ! - If there is no connection between two populations, show the grey layer
# ! background.
# ! - All parts of the target layer that are outside the mask or strictly zero
# ! are off-white.
# ! - If it looks bright white, it is a very diluted shade of the color for the
# ! pertaining synpase type.
# ! Full detail, explicit color limits
# ! ----------------------------------
c_cp.plot(colorLimits=[0, 1])
pylab.show()
# ! As above, but the common color scale is now given explicitly.
# ! The arrow at the right end of the color scale indicates that the values
# ! in the kernels extend beyond +1.
# ! Aggregate by synapse models
# ! -----------------------------
# ! For each population pair, connections are summed across
# ! synapse models.
# !
# ! - Excitatory kernels are weighted with +1, inhibitory kernels with -1.
# ! - The resulting kernels are shown on a color scale ranging from red
# ! (inhibitory) via white (zero) to blue (excitatory).
# ! - Each patch has its own color scale
c_cp.plot(aggrSyns=True)
pylab.show()
# !
# ! - AMPA and NMDA connections from B/E to B/E are now combined to form a
# ! cross.
# ! - GABA_A and GABA_B connections from B/I to B/E are two concentric spots.
# ! Aggregate by population group
# ! ------------------------------
c_cp.plot(aggrGroups=True)
pylab.show()
# ! This is in many ways orthogonal to aggregation by synapse model:
# ! We keep synapse types separat, while we combine across populations. Thus,
# ! we have added the horizonal bar (B/E to B/E, row 3) with the spot
# ! (B/E to B/I, row 5).
# ! Aggregate by population group and synapse model
# ! -----------------------------------------------------------------
c_cp.plot(aggrGroups=True, aggrSyns=True)
pylab.show()
# ! All connection are combined for each pair of sender/target layer.
# ! CPTs using the total charge deposited (TCD) as intensity
# ! -----------------------------------------------------------
# ! TCD-based CPTs are currently only available for the ht_neuron, since
# ! ConnPlotter does not know how to obtain \int g(t) dt from NEST for other
# ! conductance-based model neurons.
# ! We need to create a separate ConnectionPattern instance for each membrane
# ! potential we want to use in the TCD computation
c_cp_75 = cpl.ConnectionPattern(c_layer, c_conn, intensity='tcd',
mList=c_model, Vmem=-75.0)
c_cp_45 = cpl.ConnectionPattern(c_layer, c_conn, intensity='tcd',
mList=c_model, Vmem=-45.0)
# ! In order to obtain a meaningful comparison between both membrane
# ! potentials, we use the same global color scale.
# ! V_m = -75 mV
# ! ::::::::::::::
c_cp_75.plot(colorLimits=[0, 150])
pylab.show()
# ! V_m = -45 mV
# ! ::::::::::::::
c_cp_45.plot(colorLimits=[0, 150])
pylab.show()
# ! Note that the NMDA projection virtually vanishes for V_m=-75mV, but is very
# ! strong for V_m=-45mV. GABA_A and GABA_B projections are also stronger,
# ! while AMPA is weaker for V_m=-45mV.
# ! Non-Dale network model
# ! ======================
# ! By default, ConnPlotter assumes that networks follow Dale's law, i.e.,
# ! either make excitatory or inhibitory connections. If this assumption
# ! is violated, we need to inform ConnPlotter how synapse types are grouped.
# ! We look at a simple example here.
# ! Load model
nd_layer, nd_conn, nd_model = ex.non_dale()
# ! We specify the synapse configuration using the synTypes argument:
# !
# ! - synTypes is a tuple.
# ! - Each element in the tuple represents a group of synapse models
# ! - Any sender can make connections with synapses from **one group only**.
# ! - Each synapse model is specified by a ``SynType``.
# ! - The SynType constructor takes three arguments:
# !
# ! * The synapse model name
# ! * The weight to apply then aggregating across synapse models
# ! * The color to use for the synapse type
# !
# ! - Synapse names must be unique, and must form a superset of all synapse
# ! models in the network.
nd_cp = cpl.ConnectionPattern(nd_layer, nd_conn, synTypes=(
(cpl.SynType('exc', 1.0, 'b'), cpl.SynType('inh', -1.0, 'r')),))
showTextTable(nd_cp, 'non_dale_tt')
# $ \centerline{\includegraphics{non_dale_tt.pdf}}
nd_cp.plot()
pylab.show()
# ! Note that we now have red and blue patches side by side, as the same
# ! population can make excitatory and inhibitory connections.
# ! Configuring the ConnectionPattern display
# ! =========================================
# ! I will now show you a few ways in which you can configure how ConnPlotter
# ! shows connection patterns.
# ! User defined synapse types
# ! --------------------------
# !
# ! By default, ConnPlotter knows two following sets of synapse types.
# !
# ! exc/inh
# ! - Used automatically when all connections have the same synapse_model.
# ! - Connections with positive weight are assigned model exc, those with
# ! negative weight model inh.
# ! - When computing totals, exc has weight +1, inh weight -1
# ! - Exc is colored blue, inh red.
# !
# ! AMPA/NMDA/GABA_A/GABA_B
# ! - Used if the set of ``synapse_model`` s in the network is a subset of
# ! those four types.
# ! - AMPA/NMDA carry weight +1, GABA_A/GABA_B weight -1.
# ! - Colors are as follows:
# !
# ! :AMPA: blue
# ! :NMDA: green
# ! :GABA_A: red
# ! :GABA_B: magenta
# !
# !
# ! We saw a first example of user-defined synapse types in the non-Dale
# ! example above. In that case, we only changed the grouping. Here, I will
# ! demonstrate the effect of different ordering, weighting, and color
# ! specifications. We use the complex model from above as example.
# !
# ! *NOTE*: It is most likey a *bad idea* to change the colors or placement of
# ! synapse types. If everyone uses the same design rules, we will all be able
# ! to read each others figures much more easily.
# ! Placement of synapse types
# ! ::::::::::::::::::::::::::
# !
# ! The ``synTypes`` nested tuple defines the placement of patches for
# ! different synapse models. Default layout is
# !
# ! ====== ======
# ! AMPA NMDA
# ! GABA_A GABA_B
# ! ====== ======
# !
# ! All four matrix elements are shown in this layout only when using
# ! ``mode='layer'`` display. Otherwise, one or the other row is shown.
# ! Note that synapses that can arise from a layer simultaneously, must
# ! always be placed on one matrix row, i.e., in one group. As an example,
# ! we now invert placement, without any other changes:
cinv_syns = ((cpl.SynType('GABA_B', -1, 'm'), cpl.SynType('GABA_A', -1, 'r')),
(cpl.SynType('NMDA', 1, 'g'), cpl.SynType('AMPA', 1, 'b')))
cinv_cp = cpl.ConnectionPattern(c_layer, c_conn, synTypes=cinv_syns)
cinv_cp.plot()
pylab.show()
# ! Notice that on each row the synapses are exchanged compared to the original
# ! figure above. When displaying by layer, also the rows have traded place:
cinv_cp.plot(aggrGroups=True)
pylab.show()
# ! Totals are not affected:
cinv_cp.plot(aggrGroups=True, aggrSyns=True)
pylab.show()
# ! Weighting of synapse types in ``totals`` mode
# ! :::::::::::::::::::::::::::::::::::::::::::::
# !
# ! Different synapses may have quite different efficacies, so weighting them
# ! all with +-1 when computing totals may give a wrong impression. Different
# ! weights can be supplied as second argument to SynTypes(). We return to the
# ! normal placement of synapses and
# ! create two examples with very different weights:
cw1_syns = ((cpl.SynType('AMPA', 10, 'b'), cpl.SynType('NMDA', 1, 'g')),
(cpl.SynType('GABA_A', -2, 'g'), cpl.SynType('GABA_B', -10, 'b')))
cw1_cp = cpl.ConnectionPattern(c_layer, c_conn, synTypes=cw1_syns)
cw2_syns = ((cpl.SynType('AMPA', 1, 'b'), cpl.SynType('NMDA', 10, 'g')),
(cpl.SynType('GABA_A', -20, 'g'), cpl.SynType('GABA_B', -1, 'b')))
cw2_cp = cpl.ConnectionPattern(c_layer, c_conn, synTypes=cw2_syns)
# ! We first plot them both in population mode
cw1_cp.plot(aggrSyns=True)
pylab.show()
cw2_cp.plot(aggrSyns=True)
pylab.show()
# ! Finally, we plot them aggregating across groups and synapse models
cw1_cp.plot(aggrGroups=True, aggrSyns=True)
pylab.show()
cw2_cp.plot(aggrGroups=True, aggrSyns=True)
pylab.show()
# ! Alternative colors for synapse patches
# ! ::::::::::::::::::::::::::::::::::::::
# ! Different colors can be specified using any legal color specification.
# ! Colors should be saturated, as they will be mixed with white. You may
# ! also provide a colormap explicitly. For this example, we use once more
# ! normal placement and weights. As all synapse types are shown in layer
# ! mode, we use that mode for display here.
cc_syns = (
(cpl.SynType('AMPA', 1, 'maroon'), cpl.SynType('NMDA', 1, (0.9, 0.5, 0))),
(cpl.SynType('GABA_A', -1, '0.7'), cpl.SynType('GABA_B', 1, pylab.cm.hsv)))
cc_cp = cpl.ConnectionPattern(c_layer, c_conn, synTypes=cc_syns)
cc_cp.plot(aggrGroups=True)
pylab.show()
# ! We get the following colors:
# !
# ! AMPA brownish
# ! NMDA golden orange
# ! GABA_A jet colormap from red (max) to blue (0)
# ! GABA_B grey
# !
# ! **NB:** When passing an explicit colormap, parts outside the mask will be
# ! shown to the "bad" color of the colormap, usually the "bottom" color in the
# ! map. To let points outside the mask appear in white, set the bad color of
# ! the colormap; unfortunately, this modifies the colormap.
pylab.cm.hsv.set_bad(cpl.colormaps.bad_color)
ccb_syns = (
(cpl.SynType('AMPA', 1, 'maroon'),
cpl.SynType('NMDA', 1, (0.9, 0.5, 0.1))),
(cpl.SynType('GABA_A', -1, '0.7'),
cpl.SynType('GABA_B', 1, pylab.cm.hsv)))
ccb_cp = cpl.ConnectionPattern(c_layer, c_conn, synTypes=ccb_syns)
ccb_cp.plot(aggrGroups=True)
pylab.show()
# ! Other configuration options
# ! ---------------------------
# !
# ! Some more adjustments are possible by setting certain module properties.
# ! Some of these need to be set before ConnectionPattern() is constructed.
# !
# ! Background color for masked parts of each patch
cpl.colormaps.bad_color = 'cyan'
# ! Background for layers
cpl.plotParams.layer_bg = (0.8, 0.8, 0.0)
# ! Resolution for patch computation
cpl.plotParams.n_kern = 5
# ! Physical size of patches: longest egde of largest patch, in mm
cpl.plotParams.patch_size = 40
# ! Margins around the figure (excluding labels)
cpl.plotParams.margins.left = 40
cpl.plotParams.margins.top = 30
cpl.plotParams.margins.bottom = 15
cpl.plotParams.margins.right = 30
# ! Fonts for layer and population labels
import matplotlib.font_manager as fmgr
cpl.plotParams.layer_font = fmgr.FontProperties(family='serif', weight='bold',
size='xx-large')
cpl.plotParams.pop_font = fmgr.FontProperties('small')
# ! Orientation for layer and population label
cpl.plotParams.layer_orientation = {'sender': 'vertical', 'target': 60}
cpl.plotParams.pop_orientation = {'sender': 'horizontal', 'target': -45}
# ! Font for legend titles and ticks, tick placement, and tick format
cpl.plotParams.legend_title_font = fmgr.FontProperties(family='serif',
weight='bold',
size='large')
cpl.plotParams.legend_tick_font = fmgr.FontProperties(family='sans-serif',
weight='light',
size='xx-small')
cpl.plotParams.legend_ticks = [0, 1, 2]
cpl.plotParams.legend_tick_format = '%.1f pA'
cx_cp = cpl.ConnectionPattern(c_layer, c_conn)
cx_cp.plot(colorLimits=[0, 2])
pylab.show()
# ! Several more options are available to control the format of the color bars
# ! (they all are members of plotParams):
# ! * legend_location : if 'top', place synapse name atop color bar
# ! * cbwidth : width of single color bar relative to figure
# ! * margins.colbar : height of lower margin set aside for color bar, in mm
# ! * cbheight : height of single color bar relative to margins.colbar
# ! * cbwidth : width of single color bar relative to figure width
# ! * cbspace : spacing between color bars, relative to figure width
# ! * cboffset : offset of first color bar from left margin, relative to
# ! figure width
# ! You can also specify the width of the final figure, but this may not work
# ! well with on-screen display or here in pyreport. Width is in mm.
# ! Note that left and right margin combined are 70mm wide, so only 50mm are
# ! left for the actual CPT.
cx_cp.plot(fixedWidth=120)
pylab.show()
# ! If not using pyreport, we finally show and block
if not using_pyreport:
print("")
print("The connplotter_tutorial script is done. " +
"Call pylab.show() and enjoy the figures!")
print(
"You may need to close all figures manually " +
"to get the Python prompt back.")
print("")
pylab.show = pylab_show
| gpl-2.0 |
CtraliePubs/SOCGMM2016_SlidingWindowVideo | JumpingJacks/plotPixel.py | 1 | 1320 | import numpy as np
import matplotlib.pyplot as plt
import scipy.misc
import sys
sys.path.append("../")
sys.path.append("../S3DGLPy")
from VideoTools import *
from PCAGL import *
if __name__ == '__main__':
(Vid, IDims) = loadCVVideo('jumpingjackscropped.avi')
N = Vid.shape[1]
loc = [70, 323]
vals = np.zeros((N, 3))
plt.figure(figsize=(12, 6))
for i in range(N):
plt.clf()
f = np.reshape(Vid[:, i], IDims)
f = f[:, :, [2, 1, 0]] #CV stores as BGR
f = f*255
thisvals = np.zeros((100, 3))
idx = 0
for ii in range(-5, 5):
for jj in range(-5, 5):
thisvals[idx, :] = np.array(f[loc[0]+ii, loc[1]+jj, :])
f[loc[0]+ii, loc[1]+jj, :] = [0, 255.0, 0]
idx += 1
thisvals = np.mean(thisvals, 0)
vals[i, :] = thisvals.flatten()
#scipy.misc.imsave("VideoStack%i.png"%i, I)
plt.subplot(121)
plt.imshow(f/255.0)
plt.axis('off')
plt.subplot(122)
plt.hold(True)
plt.plot(vals[:, 0], 'r')
plt.plot(vals[:, 1], 'g')
plt.plot(vals[:, 2], 'b')
plt.ylim([0, 255])
plt.xlabel('Frame Number')
plt.ylabel('RGB Value')
plt.savefig("Pixel%i.png"%i, dpi=150, bbox_inches = 'tight')
| apache-2.0 |
zhenv5/scikit-learn | sklearn/ensemble/partial_dependence.py | 251 | 15097 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
bbci/mushu | test/test_triggerdelay.py | 3 | 3724 | #!/usr/bin/env python
# test_triggerdelay.py
# Copyright (C) 2013 Bastian Venthur
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from __future__ import division
import unittest
import json
import socket
import time
import math
import libmushu
from libmushu.driver.randomamp import RandomAmp
from libmushu.amplifier import Amplifier
import logging
import numpy as np
from matplotlib import pyplot as plt
logging.basicConfig(format='%(relativeCreated)10.0f %(processName)-11s %(threadName)-10s %(name)-10s %(levelname)8s %(message)s', level=logging.NOTSET)
logger = logging.getLogger(__name__)
logger.info('Logger started')
class TriggerTestAmp(Amplifier):
"""TriggerTestAmp.
This amp sends marker just before and after its blocking sleep, along with
the current timestamp as payload. On the receiving side one can use the
payload to calculate the delay between sending and receiving triggers via
TCP/IP.
"""
def __init__(self):
self.channels = 100
self.fs = 100
self.last_sample = time.time()
def start(self):
self._marker_count = 0
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect(('localhost', 12344))
@property
def sample_len(self):
return 1. / self.fs
@property
def elapsed(self):
return time.time() - self.last_sample
def get_data(self):
self.s.sendall("%f\n" % time.time())
# simulate blocking until we have enough data
elapsed = self.elapsed
if elapsed < self.sample_len:
time.sleep(self.sample_len - elapsed)
self._marker_count += 1
self.s.sendall("%f\n" % time.time())
dt = self.elapsed
samples = math.floor(self.fs * dt)
data = np.random.randint(0, 1024, (samples, self.channels))
self.last_sample = time.time()
return data, [[samples-1, self._marker_count]]
def configure(self, fs):
self.fs = fs
class TestTriggerDelay(unittest.TestCase):
"""Test the trigger delay."""
def test_triggerdelay(self):
"""Mean and max delay must be reasonably small."""
for i in 10, 100, 1000, 10000:
logger.debug('Setting FS to {fs}kHz'.format(fs=(i / 1000)))
amp = libmushu.AmpDecorator(TriggerTestAmp)
amp._debug_tcp_marker_timestamps = True
amp.configure(fs=i)
amp.start()
delays = []
t_start = time.time()
while time.time() < t_start + 1:
data, marker = amp.get_data()
for timestamp, m in marker:
delta_t = (timestamp - float(m)) * 1000
delays.append(delta_t)
amp.stop()
delays = np.array(delays)
logger.debug("Min: %.2f, Max: %.2f, Mean: %.2f, Std: %.2f" % (delays.min(), delays.max(), delays.mean(), delays.std()))
self.assertLessEqual(delays.mean(), 1)
self.assertLessEqual(delays.max(), 10)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
automl/paramsklearn | tests/components/feature_preprocessing/test_liblinear.py | 1 | 2085 | import unittest
from sklearn.linear_model import RidgeClassifier
from ParamSklearn.components.feature_preprocessing.liblinear_svc_preprocessor import \
LibLinear_Preprocessor
from ParamSklearn.util import _test_preprocessing, PreprocessingTestCase, \
get_dataset
import sklearn.metrics
class LiblinearComponentTest(PreprocessingTestCase):
def test_default_configuration(self):
transformation, original = _test_preprocessing(LibLinear_Preprocessor)
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertFalse((transformation == 0).all())
def test_default_configuration_classify(self):
for i in range(2):
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits',
make_sparse=False)
configuration_space = LibLinear_Preprocessor.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = LibLinear_Preprocessor(random_state=1,
**{hp_name: default[hp_name]
for hp_name in
default if default[
hp_name] is not None})
preprocessor.fit(X_train, Y_train)
X_train_trans = preprocessor.transform(X_train)
X_test_trans = preprocessor.transform(X_test)
# fit a classifier on top
classifier = RidgeClassifier()
predictor = classifier.fit(X_train_trans, Y_train)
predictions = predictor.predict(X_test_trans)
accuracy = sklearn.metrics.accuracy_score(predictions, Y_test)
self.assertAlmostEqual(accuracy, 0.87917425622343659, places=2)
def test_preprocessing_dtype(self):
super(LiblinearComponentTest,
self)._test_preprocessing_dtype(LibLinear_Preprocessor,
test_sparse=False)
| bsd-3-clause |
0x0all/scikit-learn | benchmarks/bench_plot_ward.py | 290 | 1260 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| bsd-3-clause |
dsilvestro/PyRate | experimental_code/plot_BDNN.py | 1 | 13787 | import numpy as np
np.set_printoptions(suppress= 1, precision=3)
import os, csv
import pandas as pd
def softPlus(z):
return np.log(np.exp(z) + 1)
def get_rate_BDNN(rate, x, w):
# n: n species, j: traits, i: nodes
z = np.einsum('nj,ij->ni', x, w[0])
z[z < 0] = 0
z = np.einsum('ni,i->n', z, w[1])
rates = np.exp(z) * rate
return rates
def get_posterior_weigths(logfile, n_traits, burnin):
head = np.array(next(open(logfile)).split())
w_lam_0_indx = [i for i in range(len(head)) if 'w_lam_0' in head[i]]
w_lam_1_indx = [i for i in range(len(head)) if 'w_lam_1' in head[i]]
rate_l_0_indx = [i for i in range(len(head)) if head[i] == 'lambda_0']
w_mu_0_indx = [i for i in range(len(head)) if 'w_mu_0' in head[i]]
w_mu_1_indx = [i for i in range(len(head)) if 'w_mu_1' in head[i]]
rate_m_0_indx = [i for i in range(len(head)) if head[i] == 'mu_0']
post_tbl = np.loadtxt(logfile, skiprows=1)
post_tbl = post_tbl[int(burnin*post_tbl.shape[0]):,:]
nodes = int(len(w_lam_0_indx)/n_traits)
w_lam_list = []
rate_l = post_tbl[:,rate_l_0_indx]
for i in range(post_tbl.shape[0]):
w_lam_0 = post_tbl[i, w_lam_0_indx].reshape((nodes,n_traits))
w_lam_1 = post_tbl[i, w_lam_1_indx]
w_lam_list.append([w_lam_0,w_lam_1])
w_mu_list = []
rate_m = post_tbl[:,rate_m_0_indx]
for i in range(post_tbl.shape[0]):
w_mu_0 = post_tbl[i, w_mu_0_indx].reshape((nodes,n_traits))
w_mu_1 = post_tbl[i, w_mu_1_indx]
w_mu_list.append([w_mu_0,w_mu_1])
return rate_l, rate_m, w_lam_list, w_mu_list
def get_posterior_weigths_BDS(logfile, n_traits, burnin):
head = np.array(next(open(logfile)).split())
tot_length_indx = [i for i in range(len(head)) if head[i] == 'tot_length'][0]
w_lam_0_indx = [i for i in range(len(head)) if 'w_lam_0' in head[i]]
w_lam_1_indx = [i for i in range(len(head)) if 'w_lam_1' in head[i]]
rate_l_0_indx = [i for i in range(len(head)) if 'lambda_' in head[i] and i < tot_length_indx]
w_mu_0_indx = [i for i in range(len(head)) if 'w_mu_0' in head[i]]
w_mu_1_indx = [i for i in range(len(head)) if 'w_mu_1' in head[i]]
rate_m_0_indx = [i for i in range(len(head)) if 'mu_' in head[i] and i < tot_length_indx]
post_tbl = np.loadtxt(logfile, skiprows=1)
post_tbl = post_tbl[int(burnin*post_tbl.shape[0]):,:]
nodes = int(len(w_lam_0_indx)/n_traits)
w_lam_list = []
for i in range(post_tbl.shape[0]):
w_lam_0 = post_tbl[i, w_lam_0_indx].reshape((nodes,n_traits))
w_lam_1 = post_tbl[i, w_lam_1_indx]
w_lam_list.append([w_lam_0,w_lam_1])
w_mu_list = []
rate_m = post_tbl[:,rate_m_0_indx]
for i in range(post_tbl.shape[0]):
w_mu_0 = post_tbl[i, w_mu_0_indx].reshape((nodes,n_traits))
w_mu_1 = post_tbl[i, w_mu_1_indx]
w_mu_list.append([w_mu_0,w_mu_1])
rate_l_list = post_tbl[:,rate_l_0_indx]
rate_m_list = post_tbl[:,rate_m_0_indx]
return rate_l_list, rate_m_list, w_lam_list, w_mu_list
def get_file_name(s):
input_file_raw = os.path.basename(s)
input_file = os.path.splitext(input_file_raw)[0] # file name without extension
return input_file
def get_posterior_rates(logfile,
trait_file,
time_range = np.arange(15),
rescale_time = 0.015,
burnin = 0.25):
traits_raw = np.genfromtxt(trait_file,skip_header=1, dtype=str)
traits = traits_raw[:,1:].astype(float)
n_traits = traits.shape[1]
# use time as a feature
n_traits += 1
a = np.min(traits, axis=0)
b = np.max(traits, axis=0)
c = np.median(traits, axis=0)
trait_select = np.array([
[a[0],c[1],0], # low Lat Woody
[c[0],c[1],0], # mid Lat Woody
[b[0],c[1],0], # high Lat Woody
[a[0],c[1],1],
[c[0],c[1],1],
[b[0],c[1],1]
])
rate_l, rate_m, w_lam, w_mu = get_posterior_weigths(logfile, n_traits, burnin)
if len(time_range):
rescaled_time = rescale_time*time_range
for i in range(len(rescaled_time)):
time_i = rescaled_time[i]
trait_tbl_i = 0+np.hstack((trait_select,time_i * np.ones((trait_select.shape[0],1))))
lam_matrix = np.zeros((len(rate_l),trait_select.shape[0]))
mu_matrix = np.zeros((len(rate_l),trait_select.shape[0]))
for j in range(len(rate_l)):
vec_lam_i = get_rate_BDNN(rate_l[j], trait_tbl_i, w_lam[j])
vec_mu_i = get_rate_BDNN(rate_m[j], trait_tbl_i, w_mu[j])
lam_matrix[j,:] = vec_lam_i
mu_matrix[j,:] = vec_mu_i
print("\ntime", time_i/rescale_time)
print("lambda:",np.mean(lam_matrix, axis=0))
print("mu:",np.mean(mu_matrix, axis=0))
def predicted_rates(logfile,
trait_file,
time_range = np.arange(15),
rescale_time = 0.015,
burnin = 0.25,
fixShift = [np.inf,56.0,33.9,23.03,5.333,2.58,0],
time_as_trait = True):
traits = np.loadtxt(trait_file, skiprows=1)
n_traits = traits.shape[1]
# use time as a feature
if time_as_trait:
n_traits += 1
rate_l2D, rate_m2D, w_lam, w_mu = get_posterior_weigths_BDS(logfile, n_traits, burnin)
rescaled_time = rescale_time*time_range
for i in range(len(rescaled_time)):
time_i = rescaled_time[i]
if time_as_trait:
trait_tbl_i = 0+np.hstack((traits,time_i * np.ones((traits.shape[0],1))))
else:
trait_tbl_i = 0+traits
rate_l = rate_l2D[:, np.digitize(time_range[i], fixShift)-1]
rate_m = rate_m2D[:, np.digitize(time_range[i], fixShift)-1]
# print(np.mean(rate_l), np.mean(rate_m))
lam_matrix = np.zeros((len(rate_l),traits.shape[0]))
mu_matrix = np.zeros((len(rate_l),traits.shape[0]))
for j in range(len(rate_l)):
vec_lam_i = get_rate_BDNN(rate_l[j], trait_tbl_i, w_lam[j])
vec_mu_i = get_rate_BDNN(rate_m[j], trait_tbl_i, w_mu[j])
lam_matrix[j,:] = vec_lam_i
mu_matrix[j,:] = vec_mu_i
# get harmonic mean of rates
# print(lam_matrix.shape)
lam_matrix_hm = np.mean(lam_matrix,axis=0) #len(rate_l) / np.sum(1/lam_matrix,axis=0)
mu_matrix_hm = np.mean(mu_matrix,axis=0) #len(rate_l) / np.sum(1/mu_matrix,axis=0)
print(time_range[i], "MAX",np.max(lam_matrix_hm), np.median(mu_matrix_hm), lam_matrix_hm.shape)
rates_predicted = np.array([lam_matrix_hm, mu_matrix_hm]).T
out_file_l = get_file_name(trait_file) + "_t%s_NN%s.txt" % (time_range[i], w_lam[0][0].shape[0])
np.savetxt(os.path.join(os.path.dirname(trait_file), out_file_l),rates_predicted, delimiter="\t")
# out_file_m = get_file_name(trait_file) + "_mu_NN%s.txt" % w_lam[0][0].shape[0]
# np.savetxt(os.path.join(os.path.dirname(trait_file), out_file_m),lam_matrix_hm, delimiter="\t")
def get_tste_from_logfile(f, burnin=0):
head = next(open(f)).split()
t_file=np.loadtxt(f, skiprows=1)
w=[x for x in head if 'TS' in x]
#w=[x for x in head if 'ts_' in x]
ind_ts0 = head.index(w[0])
y=[x for x in head if 'TE' in x]
#y=[x for x in head if 'te_' in x]
ind_te0 = head.index(y[0])
print(len(w), "species", t_file.shape)
j=0
out_list=list()
if burnin<1: burnin = int(burnin*t_file.shape[0])
out_list = []
for i in np.arange(ind_ts0,ind_te0):
meanTS= np.mean(t_file[burnin:t_file.shape[0],i])
meanTE= np.mean(t_file[burnin:t_file.shape[0],ind_te0+j])
out_list.append([meanTS, meanTE])
j += 1
return out_list
def predicted_rates_per_species(logfile,
species_trait_file=None,
trait_tbl=None, # expects pandas data frame with header and
# 1st column named: 'Taxon_name'
wd="",
time_range = np.arange(15),
rescale_time = 0.015,
burnin = 0.25,
fixShift = [np.inf,56.0,33.9,23.03,5.333,2.58,0],
time_as_trait = True,
return_post_sample=False):
if species_trait_file:
species_traits = pd.read_csv(species_trait_file, delimiter="\t")
traits = species_traits.iloc[:,1:]
elif trait_tbl is not None:
species_traits = trait_tbl
traits = species_traits.iloc[:,1:]
else:
sys.exit("No traits found")
n_traits = traits.shape[1]
# use time as a feature
if time_as_trait:
n_traits += 1
rate_l2D, rate_m2D, w_lam, w_mu = get_posterior_weigths_BDS(logfile, n_traits, burnin)
rescaled_time = rescale_time*time_range
species_rate_lam = []
species_rate_mu = []
species_rate_div = []
rate_samples = list()
for i in range(len(rescaled_time)):
time_i = rescaled_time[i]
if time_as_trait:
trait_tbl_i = 0+np.hstack((traits,time_i * np.ones((traits.shape[0],1))))
else:
trait_tbl_i = 0+traits
rate_l = rate_l2D[:, np.digitize(time_range[i], fixShift)-1]
rate_m = rate_m2D[:, np.digitize(time_range[i], fixShift)-1]
# print(np.mean(rate_l), np.mean(rate_m))
lam_matrix = np.zeros((len(rate_l),traits.shape[0]))
mu_matrix = np.zeros((len(rate_l),traits.shape[0]))
for j in range(len(rate_l)):
vec_lam_i = get_rate_BDNN(rate_l[j], trait_tbl_i, w_lam[j])
vec_mu_i = get_rate_BDNN(rate_m[j], trait_tbl_i, w_mu[j])
lam_matrix[j,:] = vec_lam_i
mu_matrix[j,:] = vec_mu_i
# get harmonic mean of rates
# print(lam_matrix.shape)
lam_matrix_hm = np.mean(lam_matrix,axis=0) #len(rate_l) / np.sum(1/lam_matrix,axis=0)
mu_matrix_hm = np.mean(mu_matrix,axis=0) #len(rate_l) / np.sum(1/mu_matrix,axis=0)
print(time_range[i], "MAX",np.max(lam_matrix_hm), np.median(mu_matrix_hm), lam_matrix_hm.shape)
net_div = lam_matrix - mu_matrix
div_matrix_hm = np.mean(net_div,axis=0)
if return_post_sample:
res = [lam_matrix, mu_matrix]
rate_samples.append(res)
species_rate_lam.append(lam_matrix_hm)
species_rate_mu.append(mu_matrix_hm)
species_rate_div.append(div_matrix_hm)
species_rate_lam = np.array(species_rate_lam).T
species_rate_mu = np.array(species_rate_mu).T
species_rate_div = np.array(species_rate_div).T
list_tste = get_tste_from_logfile(logfile, burnin)
with open(os.path.join(wd, "taxon_speciation_rates.txt"), 'w') as f:
writer = csv.writer(f, delimiter='\t')
l = ["Species","ts","te"]
h = ["%s_Ma" % time_range[i] for i in range(len(rescaled_time))]
writer.writerow(l+h)
for i in range(len(species_rate_lam)):
l = [species_traits["Taxon_name"][i]] + list_tste[i] + list(species_rate_lam[i])
writer.writerow(l)
with open(os.path.join(wd, "taxon_extinction_rates.txt"), 'w') as f:
writer = csv.writer(f, delimiter='\t')
l = ["Species","ts","te"]
h = ["%s_Ma" % time_range[i] for i in range(len(rescaled_time))]
writer.writerow(l+h)
for i in range(len(species_rate_mu)):
l = [species_traits["Taxon_name"][i]] + list_tste[i] + list(species_rate_mu[i])
writer.writerow(l)
with open(os.path.join(wd, "taxon_diversification_rates.txt"), 'w') as f:
writer = csv.writer(f, delimiter='\t')
l = ["Species","ts","te"]
h = ["%s_Ma" % time_range[i] for i in range(len(rescaled_time))]
writer.writerow(l+h)
for i in range(len(species_rate_div)):
l = [species_traits["Taxon_name"][i]] + list_tste[i] + list(species_rate_div[i])
writer.writerow(l)
if return_post_sample:
return np.array(rate_samples)
if __name__ == '__main__':
species_trait_file= 'NTemFloraTraits.txt'
logfile = 'NTemFlora_1_BDS_BDNN7T_mcmc.log'
wd = 'logs/'
time_range = np.arange(15)
rescale_time = 0.015
burnin = 0.75
time_as_trait = True
fixShift = [np.inf,56.0,47.8,41.2,37.8,33.9,28.1,23.03,20.44,15.97,13.82,11.63,7.246,5.333,3.6,2.58]
predicted_rates_per_species(logfile,
species_trait_file,
wd,
time_range = np.arange(0, 65, 1),
rescale_time = 0.015,
burnin = 0.75,
fixShift = [np.inf,56.0,33.9,23.03,5.333,2.58,0],
time_as_trait = True)
| agpl-3.0 |
fabianp/scikit-learn | sklearn/feature_selection/tests/test_base.py | 170 | 3666 | import numpy as np
from scipy import sparse as sp
from nose.tools import assert_raises, assert_equal
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform(feature_names)
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform(feature_names_t)
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| bsd-3-clause |
dsavransky/plandb.sioslab.com | getDataFromIPAC_composite.py | 1 | 44449 | import requests
import pandas
from StringIO import StringIO
import astropy.units as u
import astropy.constants as const
import EXOSIMS.PlanetPhysicalModel.Forecaster
from sqlalchemy import create_engine
import getpass,keyring
import numpy as np
import os
from scipy.interpolate import interp1d, interp2d, RectBivariateSpline
import sqlalchemy.types
import re
import scipy.integrate
import scipy.interpolate as interpolate
from EXOSIMS.util.eccanom import eccanom
from EXOSIMS.util.deltaMag import deltaMag
import EXOSIMS.Prototypes.PlanetPhysicalModel
from astropy.time import Time
from getDataFromIPAC_extended import substitute_data
%pylab --no-import-all
t0 = Time('2026-01-01T00:00:00', format='isot', scale='utc')
#grab the data
query = """https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?table=compositepars&select=*&format=csv"""
r = requests.get(query)
data = pandas.read_csv(StringIO(r.content))
query2 = """https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?table=exoplanets&select=*&format=csv"""
r2 = requests.get(query2)
data2 = pandas.read_csv(StringIO(r2.content))
#strip leading 'f' on data colnames
colmap = {k:k[1:] if (k.startswith('fst_') | k.startswith('fpl_')) else k for k in data.keys()}
data = data.rename(columns=colmap)
#sma, eccen, metallicity cols were renamed so name them back for merge
data = data.rename(columns={'pl_smax':'pl_orbsmax',
'pl_smaxerr1':'pl_orbsmaxerr1',
'pl_smaxerr2':'pl_orbsmaxerr2',
'pl_smaxlim':'pl_orbsmaxlim',
'pl_smaxreflink':'pl_orbsmaxreflink',
'pl_eccen':'pl_orbeccen',
'pl_eccenerr1':'pl_orbeccenerr1',
'pl_eccenerr2':'pl_orbeccenerr2',
'pl_eccenlim':'pl_orbeccenlim',
'pl_eccenreflink':'pl_orbeccenreflink',
'st_met':'st_metfe',
'st_meterr1':'st_metfeerr1',
'st_meterr2':'st_metfeerr2',
'st_metreflink':'st_metfereflink',
'st_metlim':'st_metfelim',
})
#sort by planet name
data = data.sort_values(by=['pl_name']).reset_index(drop=True)
data2 = data2.sort_values(by=['pl_name']).reset_index(drop=True)
#merge data sets
data = data.combine_first(data2)
# substitute data from the extended table.
data = substitute_data(data)
#sort by planet name
data = data.sort_values(by=['pl_name']).reset_index(drop=True)
###############################
#some sanity checking
# data3 = data.combine_first(data2)
#
# ccols = np.array(list(set(data.keys()) & set(data2.keys())))
# ncols = np.array(list(set(data2.keys()) - set(data.keys())))
#
# #compare redundant cols
# diffcs = []
# diffinds = []
# for c in ccols:
# tmp = (data[c].values == data2[c].values) | (data[c].isnull().values & data2[c].isnull().values)
# if not np.all( tmp ):
# diffcs.append(c)
# diffinds.append(np.where(~tmp)[0])
#
# for c,inds in zip(diffcs,diffinds):
# print c
# tmp = data[c][inds].isnull().values & ~(data2[c][inds].isnull().values)
# assert np.all(data3[c][inds][tmp] == data2[c][inds][tmp])
###############################
## filter rows:
# we need:
# distance AND
# (sma OR (period AND stellar mass)) AND
# (radius OR mass (either true or m\sin(i)))
keep = ~np.isnan(data['st_dist'].values) & (~np.isnan(data['pl_orbsmax'].values) | \
(~np.isnan(data['pl_orbper'].values) & ~np.isnan(data['st_mass'].values))) & \
(~np.isnan(data['pl_bmassj'].values) | ~np.isnan(data['pl_radj'].values))
data = data[keep]
data = data.reset_index(drop=True)
##fill in missing smas from period & star mass
nosma = np.isnan(data['pl_orbsmax'].values)
p2sma = lambda mu,T: ((mu*T**2/(4*np.pi**2))**(1/3.)).to('AU')
GMs = const.G*(data['st_mass'][nosma].values*u.solMass) # units of solar mass
T = data['pl_orbper'][nosma].values*u.day
tmpsma = p2sma(GMs,T)
data['pl_orbsmax'][nosma] = tmpsma
data['pl_orbsmaxreflink'][nosma] = "Calculated from stellar mass and orbital period."
##update all WAs based on sma
WA = np.arctan((data['pl_orbsmax'].values*u.AU)/(data['st_dist'].values*u.pc)).to('mas')
data['pl_angsep'] = WA.value
###################################################################
#devel (skip)
#forecaster original
#S = np.array([0.2790,0.589,-0.044,0.881]) #orig coeffs
#C0 = np.log10(1.008)
#T = np.array([2.04,((0.414*u.M_jupiter).to(u.M_earth)).value,((0.0800*u.M_sun).to(u.M_earth)).value])
#C = np.hstack((C0, C0 + np.cumsum(-np.diff(S)*np.log10(T))))
#modify neptune and jupiter leg with new transition point at saturn mass and then flat leg past jupiter mass
S = np.array([0.2790,0,0,0,0.881])
C = np.array([np.log10(1.008), 0, 0, 0, 0])
T = np.array([2.04,95.16,(u.M_jupiter).to(u.M_earth),((0.0800*u.M_sun).to(u.M_earth)).value])
Rj = u.R_jupiter.to(u.R_earth)
Rs = 8.522 #saturn radius
S[1] = (np.log10(Rs) - (C[0] + np.log10(T[0])*S[0]))/(np.log10(T[1]) - np.log10(T[0]))
C[1] = np.log10(Rs) - np.log10(T[1])*S[1]
S[2] = (np.log10(Rj) - np.log10(Rs))/(np.log10(T[2]) - np.log10(T[1]))
C[2] = np.log10(Rj) - np.log10(T[2])*S[2]
C[3] = np.log10(Rj)
C[4] = np.log10(Rj) - np.log10(T[3])*S[4]
##forecaster sanity check:
m1 = np.array([1e-3,T[0]])
r1 = 10.**(C[0] + np.log10(m1)*S[0])
m2 = T[0:2]
r2 = 10.**(C[1] + np.log10(m2)*S[1])
m3 = T[1:3]
r3 = 10.**(C[2] + np.log10(m3)*S[2])
m4 = T[2:4]
r4 = 10.**(C[3] + np.log10(m4)*S[3])
m5 = np.array([T[3],1e6])
r5 = 10.**(C[4] + np.log10(m5)*S[4])
plt.figure()
plt.loglog(m1,r1)
plt.loglog(m2,r2)
plt.loglog(m3,r3)
plt.loglog(m4,r4)
plt.loglog(m5,r5)
plt.xlabel('Mass ($M_\oplus$)')
plt.ylabel('Radius ($R_\oplus$)')
plt.loglog(m,Rf,'.',zorder=0)
##################################################################
#drop all other radius columns
data = data.drop(columns=['pl_rade', 'pl_radelim', 'pl_radserr2', 'pl_radeerr1', 'pl_rads', 'pl_radslim', 'pl_radeerr2', 'pl_radserr1'])
#fill in radius based on mass
noR = ((data['pl_radreflink'] == '<a refstr="CALCULATED VALUE" href="/docs/composite_calc.html" target=_blank>Calculated Value</a>') |\
data['pl_radj'].isnull()).values
m = ((data['pl_bmassj'][noR].values*u.M_jupiter).to(u.M_earth)).value
def RfromM(m):
m = np.array(m,ndmin=1)
R = np.zeros(m.shape)
S = np.array([0.2790,0,0,0,0.881])
C = np.array([np.log10(1.008), 0, 0, 0, 0])
T = np.array([2.04,95.16,(u.M_jupiter).to(u.M_earth),((0.0800*u.M_sun).to(u.M_earth)).value])
Rj = u.R_jupiter.to(u.R_earth)
Rs = 8.522 #saturn radius
S[1] = (np.log10(Rs) - (C[0] + np.log10(T[0])*S[0]))/(np.log10(T[1]) - np.log10(T[0]))
C[1] = np.log10(Rs) - np.log10(T[1])*S[1]
S[2] = (np.log10(Rj) - np.log10(Rs))/(np.log10(T[2]) - np.log10(T[1]))
C[2] = np.log10(Rj) - np.log10(T[2])*S[2]
C[3] = np.log10(Rj)
C[4] = np.log10(Rj) - np.log10(T[3])*S[4]
inds = np.digitize(m,np.hstack((0,T,np.inf)))
for j in range(1,inds.max()+1):
R[inds == j] = 10.**(C[j-1] + np.log10(m[inds == j])*S[j-1])
return R
R = RfromM(m)
#create mod forecaster radius column
data = data.assign(pl_radj_forecastermod=data['pl_radj'].values)
data['pl_radj_forecastermod'][noR] = ((R*u.R_earth).to(u.R_jupiter)).value
## now the Fortney model
from EXOSIMS.PlanetPhysicalModel.FortneyMarleyCahoyMix1 import FortneyMarleyCahoyMix1
fortney = FortneyMarleyCahoyMix1()
ml10 = m <= 17
Rf = np.zeros(m.shape)
Rf[ml10] = fortney.R_ri(0.67,m[ml10])
mg10 = m > 17
tmpsmas = data['pl_orbsmax'][noR].values
tmpsmas = tmpsmas[mg10]
tmpsmas[tmpsmas < fortney.giant_pts2[:,1].min()] = fortney.giant_pts2[:,1].min()
tmpsmas[tmpsmas > fortney.giant_pts2[:,1].max()] = fortney.giant_pts2[:,1].max()
tmpmass = m[mg10]
tmpmass[tmpmass > fortney.giant_pts2[:,2].max()] = fortney.giant_pts2[:,2].max()
Rf[mg10] = interpolate.griddata(fortney.giant_pts2, fortney.giant_vals2,( np.array([10.]*np.where(mg10)[0].size), tmpsmas, tmpmass))
data = data.assign(pl_radj_fortney=data['pl_radj'].values)
data['pl_radj_fortney'][noR] = ((Rf*u.R_earth).to(u.R_jupiter)).value
#######
#quick fig for docs
plt.figure()
plt.plot(R,Rf,'.')
plt.plot([0,12],[0,12])
plt.xlim([0,12])
plt.ylim([0,12])
plt.xlabel('Modified Forecster Fit ($R_\oplus$)')
plt.ylabel('Fortney et al. (2007) Fit ($R_\oplus$)')
#######
##populate max WA based on available eccentricity data (otherwise maxWA = WA)
hase = ~np.isnan(data['pl_orbeccen'].values)
maxWA = WA[:]
maxWA[hase] = np.arctan((data['pl_orbsmax'][hase].values*(1 + data['pl_orbeccen'][hase].values)*u.AU)/(data['st_dist'][hase].values*u.pc)).to('mas')
data = data.assign(pl_maxangsep=maxWA.value)
#populate min WA based on eccentricity & inclination data (otherwise minWA = WA)
hasI = ~np.isnan(data['pl_orbincl'].values)
s = data['pl_orbsmax'].values*u.AU
s[hase] *= (1 - data['pl_orbeccen'][hase].values)
s[hasI] *= np.cos(data['pl_orbincl'][hasI].values*u.deg)
s[~hasI] = 0
minWA = np.arctan(s/(data['st_dist'].values*u.pc)).to('mas')
data = data.assign(pl_minangsep=minWA.value)
#data.to_pickle('data_062818.pkl')
##############################
##restore from disk:
data = pandas.read_pickle('data_080718.pkl')
##############################################################################################################################
# grab photometry data
#enginel = create_engine('sqlite:///' + os.path.join(os.getenv('HOME'),'Documents','AFTA-Coronagraph','ColorFun','AlbedoModels.db'))
enginel = create_engine('sqlite:///' + os.path.join(os.getenv('HOME'),'Documents','AFTA-Coronagraph','ColorFun','AlbedoModels_2015.db'))
# getting values
meta_alb = pandas.read_sql_table('header',enginel)
metallicities = meta_alb.metallicity.unique()
metallicities.sort()
betas = meta_alb.phase.unique()
betas.sort()
dists = meta_alb.distance.unique()
dists.sort()
clouds = meta_alb.cloud.unique()
clouds.sort()
cloudstr = clouds.astype(str)
for j in range(len(cloudstr)):
cloudstr[j] = 'f'+cloudstr[j]
cloudstr[cloudstr == 'f0.0'] = 'NC'
cloudstr[cloudstr == 'f1.0'] = 'f1'
cloudstr[cloudstr == 'f3.0'] = 'f3'
cloudstr[cloudstr == 'f6.0'] = 'f6'
tmp = pandas.read_sql_table('g25_t150_m0.0_d0.5_NC_phang000',enginel)
wavelns = tmp.WAVELN.values
##################
#unnecessary if pulling all phot data
photdata550 = np.zeros((meta_alb.metallicity.unique().size,meta_alb.distance.unique().size, meta_alb.phase.unique().size))
for i,fe in enumerate(meta_alb.metallicity.unique()):
basename = 'g25_t150_m'+str(fe)+'_d'
print(basename)
for j,d in enumerate(meta_alb.distance.unique()):
for k,beta in enumerate(meta_alb.phase.unique()):
name = basename+str(d)+'_NC_phang'+"%03d"%beta
try:
tmp = pandas.read_sql_table(name,enginel)
except:
photdata550[i,j,k] = np.nan
continue
ind = np.argmin(np.abs(tmp['WAVELN']-0.550))
pval = tmp['GEOMALB'][ind]
photdata550[i,j,k] = pval
photinterps = {}
for i,fe in enumerate(meta_alb.metallicity.unique()):
photinterps[fe] = {}
for j,d in enumerate(meta_alb.distance.unique()):
photinterps[fe][d] = interp1d(betas[np.isfinite(photdata550[i,j,:])],photdata550[i,j,:][np.isfinite(photdata550[i,j,:])],kind='cubic')
#################
allphotdata = np.zeros((metallicities.size, dists.size, clouds.size, betas.size, wavelns.size))
for i,fe in enumerate(metallicities):
basename = 'g25_t150_m'+str(fe)+'_d'
for j,d in enumerate(dists):
basename2 = basename+str(d)+'_'
for k,cloud in enumerate(clouds):
basename3 = basename2+cloudstr[k]+'_phang'
print(basename3)
for l,beta in enumerate(betas):
name = basename3+"%03d"%beta
try:
tmp = pandas.read_sql_table(name,enginel)
except:
print("Missing: %s"%name)
allphotdata[i,j,k,l,:] = np.nan
continue
pvals = tmp['GEOMALB'].values
if len(tmp) != len(wavelns):
missing = list(set(wavelns) - set(tmp.WAVELN.values))
inds = np.searchsorted(tmp['WAVELN'].values,missing)
pvals = np.insert(pvals,inds,np.nan)
assert np.isnan(pvals[wavelns==missing[0]])
print("Filled value: %s, %s"%(name,missing))
allphotdata[i,j,k,l,:] = pvals
#patch individual nans
for i,fe in enumerate(metallicities):
for j,d in enumerate(dists):
for k,cloud in enumerate(clouds):
for l,beta in enumerate(betas):
nans = np.isnan(allphotdata[i,j,k,l,:])
if np.any(nans) & ~np.all(nans):
tmp = interp1d(wavelns[~nans],allphotdata[i,j,k,l,~nans],kind='cubic')
allphotdata[i,j,k,l,nans] = tmp(wavelns[nans])
##np.savez('allphotdata',metallicities=metallicities,dists=dists,clouds=clouds,cloudstr=cloudstr,betas=betas,wavelns=wavelns,allphotdata=allphotdata)
#np.savez('allphotdata_2015',metallicities=metallicities,dists=dists,clouds=clouds,cloudstr=cloudstr,betas=betas,wavelns=wavelns,allphotdata=allphotdata)
#######
# visualization:
wind = np.argmin(np.abs(wavelns - 0.575))
dind = np.argmin(np.abs(dists - 1))
wind = np.argmin(np.abs(wavelns - 0.825))
dind = np.argmin(np.abs(dists - 5))
ls = ["-","--","-.",":","o-","s-","d-","h-"]
plt.figure()
for j in range(clouds.size):
plt.plot(betas,allphotdata[0,dind,j,:,wind],ls[j],label=cloudstr[j])
plt.ylabel('$p\Phi(\\beta)$')
plt.xlabel('Phase (deg)')
plt.xlim([0,180])
plt.legend()
plt.title('Phase Curves for %4.4f $\mu$m at %3.1f AU'%(wavelns[wind],dists[dind]))
########
#restore photdata fromdisk
#tmp = np.load('allphotdata.npz')
tmp = np.load('allphotdata_2015.npz')
allphotdata = tmp['allphotdata']
clouds = tmp['clouds']
cloudstr = tmp['cloudstr']
wavelns = tmp['wavelns']
betas = tmp['betas']
dists = tmp['dists']
metallicities = tmp['metallicities']
#########
def makeninterp(vals):
ii = interp1d(vals,vals,kind='nearest',bounds_error=False,fill_value=(vals.min(),vals.max()))
return ii
distinterp = makeninterp(dists)
betainterp = makeninterp(betas)
feinterp = makeninterp(metallicities)
cloudinterp = makeninterp(clouds)
photinterps2 = {}
quadinterps = {}
for i,fe in enumerate(metallicities):
photinterps2[fe] = {}
quadinterps[fe] = {}
for j,d in enumerate(dists):
photinterps2[fe][d] = {}
quadinterps[fe][d] = {}
for k,cloud in enumerate(clouds):
if np.any(np.isnan(allphotdata[i,j,k,:,:])):
#remove whole rows of betas
goodbetas = np.array(list(set(range(len(betas))) - set(np.unique(np.where(np.isnan(allphotdata[i,j,k,:,:]))[0]))))
photinterps2[fe][d][cloud] = RectBivariateSpline(betas[goodbetas],wavelns,allphotdata[i,j,k,goodbetas,:])
#photinterps2[fe][d][cloud] = interp2d(betas[goodbetas],wavelns,allphotdata[i,j,k,goodbetas,:].transpose(),kind='cubic')
else:
#photinterps2[fe][d][cloud] = interp2d(betas,wavelns,allphotdata[i,j,k,:,:].transpose(),kind='cubic')
photinterps2[fe][d][cloud] = RectBivariateSpline(betas,wavelns,allphotdata[i,j,k,:,:])
quadinterps[fe][d][cloud] = interp1d(wavelns,allphotdata[i,j,k,9,:].flatten())
##############################################################################################################################
## quadrature columns
#wavelengths of interest
#lambdas = np.array([575, 635, 660, 706, 760, 825])
lambdas = [575, 660, 730, 760, 825] #nm
bps = [10,18,18,18,10] #percent
bands = []
bandws = []
bandwsteps = []
for lam,bp in zip(lambdas,bps):
band = np.array([-1,1])*float(lam)/1000.*bp/200.0 + lam/1000.
bands.append(band)
[ws,wstep] = np.linspace(band[0],band[1],100,retstep=True)
bandws.append(ws)
bandwsteps.append(wstep)
bands = np.vstack(bands) #um
bws = np.diff(bands,1).flatten() #um
bandws = np.vstack(bandws)
bandwsteps = np.array(bandwsteps)
smas = data['pl_orbsmax'].values
fes = data['st_metfe'].values
fes[np.isnan(fes)] = 0.0
Rps = data['pl_radj_forecastermod'].values
inc = data['pl_orbincl'].values
eccen = data['pl_orbeccen'].values
arg_per = data['pl_orblper'].values
tmpout = {}
for c in clouds:
for l in lambdas:
tmpout['quad_pPhi_'+"%03dC_"%(c*100)+str(l)+"NM"] = np.zeros(smas.shape)
tmpout['quad_dMag_'+"%03dC_"%(c*100)+str(l)+"NM"] = np.zeros(smas.shape)
tmpout['quad_radius_' + "%03dC_" % (c * 100) + str(l) + "NM"] = np.zeros(smas.shape)
for j, (Rp, fe,a, I, e, w) in enumerate(zip(Rps, fes,smas, inc, eccen, arg_per)):
print(j)
for c in clouds:
for l,band,bw,ws,wstep in zip(lambdas,bands,bws,bandws,bandwsteps):
#pphi = photinterps2[float(feinterp(fe))][float(distinterp(a))][c](90.0,float(l)/1000.).flatten()
#pphi = scipy.integrate.quad(quadinterps[float(feinterp(fe))][float(distinterp(a))][c],band[0],band[1])[0]/bw
#Only calc quadrature distance if known eccentricity and argument of periaps, and not face-on orbit
if not np.isnan(e) and not np.isnan(w) and I != 0:
nu1 = -w
nu2 = np.pi - w
r1 = a * (1.0 - e ** 2.0) / (1.0 + e * np.cos(nu1))
r2 = a * (1.0 - e ** 2.0) / (1.0 + e * np.cos(nu2))
pphi1 = quadinterps[float(feinterp(fe))][float(distinterp(r1))][c](ws).sum() * wstep / bw
pphi2 = quadinterps[float(feinterp(fe))][float(distinterp(r2))][c](ws).sum() * wstep / bw
if np.isinf(pphi1):
print("Inf value encountered in pphi")
pphi1 = np.nan
if np.isinf(pphi2):
print("Inf value encountered in pphi")
pphi2 = np.nan
# pphi[np.isinf(pphi)] = np.nan
# pphi = pphi[0]
dMag1 = deltaMag(1, Rp * u.R_jupiter, r1 * u.AU, pphi1)
dMag2 = deltaMag(1, Rp * u.R_jupiter, r2 * u.AU, pphi2)
if np.isnan(dMag2) or dMag1 < dMag2:
dMag = dMag1
pphi = pphi1
r = r1
else:
dMag = dMag2
pphi = pphi2
r = r2
tmpout['quad_pPhi_' + "%03dC_" % (c * 100) + str(l) + "NM"][j] = pphi
tmpout['quad_radius_' + "%03dC_" % (c * 100) + str(l) + "NM"][j] = r
else:
pphi = quadinterps[float(feinterp(fe))][float(distinterp(a))][c](ws).sum()*wstep/bw
if np.isinf(pphi):
print("Inf value encountered in pphi")
pphi = np.nan
#pphi[np.isinf(pphi)] = np.nan
#pphi = pphi[0]
tmpout['quad_pPhi_'+"%03dC_"%(c*100)+str(l)+"NM"][j] = pphi
dMag = deltaMag(1, Rp*u.R_jupiter, a*u.AU, pphi)
tmpout['quad_radius_' + "%03dC_" % (c * 100) + str(l) + "NM"][j] = a
if np.isinf(dMag):
print("Inf value encountered in dmag")
dMag = np.nan
tmpout['quad_dMag_'+"%03dC_"%(c*100)+str(l)+"NM"][j] = dMag
#collect min/max/med for every wavelength
for l in lambdas:
tmp = []
for c in clouds:
tmp.append(tmpout['quad_dMag_'+"%03dC_"%(c*100)+str(l)+"NM"])
tmp = np.vstack(tmp)
tmpout["quad_dMag_min_"+str(l)+"NM"] = np.nanmin(tmp,axis=0)
tmpout["quad_dMag_max_"+str(l)+"NM"] = np.nanmax(tmp,axis=0)
tmpout["quad_dMag_med_"+str(l)+"NM"] = np.nanmedian(tmp,axis=0)
data = data.join(pandas.DataFrame(tmpout))
#data.to_pickle('data2_080718.pkl') #incorrect bw calculation
#data.to_pickle('data3_080718.pkl') #corrected bw calculation
##############################################################################################################################
#PPMod = EXOSIMS.Prototypes.PlanetPhysicalModel.PlanetPhysicalModel()
#orbit info
M = np.linspace(0,2*np.pi,100)
plannames = data['pl_name'].values
minWA = data['pl_minangsep'].values*u.mas
maxWA = data['pl_maxangsep'].values*u.mas
orbdata = None
#row = data.iloc[71]
for j in range(len(plannames)):
row = data.iloc[j]
a = row['pl_orbsmax']
e = row['pl_orbeccen']
if np.isnan(e): e = 0.0
I = row['pl_orbincl']*np.pi/180.0
if np.isnan(I): I = np.pi/2.0
w = row['pl_orblper']*np.pi/180.0
if np.isnan(w): w = 0.0
E = eccanom(M, e)
Rp = row['pl_radj_forecastermod']
dist = row['st_dist']
fe = row['st_metfe']
if np.isnan(fe): fe = 0.0
# a1 = np.cos(w)
# a2 = np.cos(I)*np.sin(w)
# a3 = np.sin(I)*np.sin(w)
# A = a*np.vstack((a1, a2, a3))
#
# b1 = -np.sqrt(1 - e**2)*np.sin(w)
# b2 = np.sqrt(1 - e**2)*np.cos(I)*np.cos(w)
# b3 = np.sqrt(1 - e**2)*np.sin(I)*np.cos(w)
# B = a*np.vstack((b1, b2, b3))
# r1 = np.cos(E) - e
# r2 = np.sin(E)
#
# r = (A*r1 + B*r2).T
# d = np.linalg.norm(r, axis=1)
# s = np.linalg.norm(r[:,0:2], axis=1)
# beta = np.arccos(r[:,2]/d)*u.rad
nu = 2*np.arctan(np.sqrt((1.0 + e)/(1.0 - e))*np.tan(E/2.0));
d = a*(1.0 - e**2.0)/(1 + e*np.cos(nu))
s = d*np.sqrt(4.0*np.cos(2*I) + 4*np.cos(2*nu + 2.0*w) - 2.0*np.cos(-2*I + 2.0*nu + 2*w) - 2*np.cos(2*I + 2*nu + 2*w) + 12.0)/4.0
beta = np.arccos(np.sin(I)*np.sin(nu+w))*u.rad
WA = np.arctan((s*u.AU)/(dist*u.pc)).to('mas').value
print(j,plannames[j],WA.min() - minWA[j].value, WA.max() - maxWA[j].value)
outdict = {'Name': [plannames[j]]*len(M),
'M': M,
'r': d,
's': s,
'WA': WA,
'beta': beta.to(u.deg).value}
inds = np.argsort(beta)
for c in clouds:
for l,band,bw,ws,wstep in zip(lambdas,bands,bws,bandws,bandwsteps):
#pphi = photinterps2[float(feinterp(fe))][float(distinterp(a))][c](beta.to(u.deg).value[inds],float(l)/1000.)[np.argsort(inds)].flatten()
pphi = (photinterps2[float(feinterp(fe))][float(distinterp(a))][c](beta.to(u.deg).value[inds],ws).sum(1)*wstep/bw)[np.argsort(inds)]
pphi[np.isinf(pphi)] = np.nan
outdict['pPhi_'+"%03dC_"%(c*100)+str(l)+"NM"] = pphi
dMag = deltaMag(1, Rp*u.R_jupiter, d*u.AU, pphi)
dMag[np.isinf(dMag)] = np.nan
outdict['dMag_'+"%03dC_"%(c*100)+str(l)+"NM"] = dMag
#pphi = np.array([ photinterps[float(feinterp(fe))][float(distinterp(di))](bi) for di,bi in zip(d,beta.to(u.deg).value) ])
#dMag = deltaMag(1, Rp*u.R_jupiter, d*u.AU, pphi)
#phi = PPMod.calc_Phi(np.arccos(r[:,2]/d)*u.rad)
#dMag = deltaMag(0.5, Rp*u.R_jupiter, d*u.AU, phi)
out = pandas.DataFrame(outdict)
if orbdata is None:
orbdata = out.copy()
else:
orbdata = orbdata.append(out)
#orbdata.to_pickle('orbdata_080718.pkl') #incorrect bw calculation
#orbdata.to_pickle('orbdata2_080718.pkl') #corrected bw calculation
#############################################################################################################################
##variable inclination orbits
plannames = data['pl_name'].values
Isglob = np.array([90,60,30])
(l,band,bw,ws,wstep) = (lambdas[0],bands[0],bws[0],bandws[0],bandwsteps[0])
c = 3.0
altorbdata = None
#row = data.iloc[71]
for j in range(len(plannames)):
row = data.iloc[j]
print(j,plannames[j])
if not np.isnan(row['pl_orbincl']):
continue
if row['pl_bmassprov'] == 'Msini':
Icrit = np.arcsin( ((row['pl_bmassj']*u.M_jupiter).to(u.M_earth)).value/((0.0800*u.M_sun).to(u.M_earth)).value )
else:
Icrit = 10*np.pi/180.0
Is = np.hstack((Isglob*np.pi/180.0,Icrit))
a = row['pl_orbsmax']
e = row['pl_orbeccen']
if np.isnan(e): e = 0.0
w = row['pl_orblper']*np.pi/180.0
if np.isnan(w): w = 0.0
Rp = row['pl_radj_forecastermod']
dist = row['st_dist']
fe = row['st_metfe']
if np.isnan(fe): fe = 0.0
Tp = row['pl_orbper'] #days
Mstar = row['st_mass'] #solar masses
taup = row['pl_orbtper']
if (np.isnan(Tp) or Tp == 0.0) and np.isnan(Mstar):
print("No period or star mass for: %s")%(plannames[j])
continue
mu = const.G*(Mstar*u.solMass).decompose()
if np.isnan(Tp) or (Tp == 0.0):
Tp = (2*np.pi*np.sqrt(((a*u.AU)**3.0)/mu)).decompose().to(u.d).value
if Tp > 10*365.25:
print("Too long period for: %s")%(plannames[j])
continue
if np.isnan(mu):
mu = ( (a*u.AU)**3.0 * (2*np.pi/(Tp*u.d))**2. ).decompose()
n = 2*np.pi/Tp
#M = np.arange(0,Tp,30)*n
ttmp = np.arange(t0.jd,t0.jd+Tp,30)
M = np.mod(ttmp*n,2*np.pi)
E = eccanom(M, e)
nu = 2*np.arctan(np.sqrt((1.0 + e)/(1.0 - e))*np.tan(E/2.0));
d0 = a*(1.0 - e**2.0)/(1 + e*np.cos(nu))
# a1 = np.cos(w)
# b1 = -np.sqrt(1 - e**2)*np.sin(w)
outdict = {'Name': [plannames[j]]*len(M),
'M': M,
'r': d0,
'Icrit': [Icrit]*len(M)
}
for k,I in enumerate(Is):
# a2 = np.cos(I)*np.sin(w)
# a3 = np.sin(I)*np.sin(w)
# A = a*np.vstack((a1, a2, a3))
#
# b2 = np.sqrt(1 - e**2)*np.cos(I)*np.cos(w)
# b3 = np.sqrt(1 - e**2)*np.sin(I)*np.cos(w)
# B = a*np.vstack((b1, b2, b3))
# r1 = np.cos(E) - e
# r2 = np.sin(E)
#
# r = (A*r1 + B*r2).T
# d = np.linalg.norm(r, axis=1)
# s = np.linalg.norm(r[:,0:2], axis=1)
# beta = np.arccos(r[:,2]/d)*u.rad
nu = 2 * np.arctan(np.sqrt((1.0 + e) / (1.0 - e)) * np.tan(E / 2.0));
d = a * (1.0 - e ** 2.0) / (1 + e * np.cos(nu))
s = d * np.sqrt(4.0 * np.cos(2 * I) + 4 * np.cos(2 * nu + 2.0 * w) - 2.0 * np.cos(-2 * I + 2.0 * nu + 2 * w) - 2 * np.cos(2 * I + 2 * nu + 2 * w) + 12.0) / 4.0
beta = np.arccos(np.sin(I) * np.sin(nu + w)) * u.rad
WA = np.arctan((s*u.AU)/(dist*u.pc)).to('mas').value
if I == Icrit:
Itag = "crit"
else:
Itag = "%02d"%(Isglob[k])
outdict["s_I"+Itag] = s
outdict["WA_I"+Itag] = WA
outdict["beta_I"+Itag] = beta.to(u.deg).value
inds = np.argsort(beta)
pphi = (photinterps2[float(feinterp(fe))][float(distinterp(a))][c](beta.to(u.deg).value[inds],ws).sum(1)*wstep/bw)[np.argsort(inds)]
pphi[np.isinf(pphi)] = np.nan
outdict['pPhi_'+"%03dC_"%(c*100)+str(l)+"NM_I"+Itag] = pphi
dMag = deltaMag(1, Rp*u.R_jupiter, d*u.AU, pphi)
dMag[np.isinf(dMag)] = np.nan
outdict['dMag_'+"%03dC_"%(c*100)+str(l)+"NM_I"+Itag] = dMag
out = pandas.DataFrame(outdict)
if altorbdata is None:
altorbdata = out.copy()
else:
altorbdata = altorbdata.append(out)
#altorbdata.to_pickle('altorbdata_080718.pkl')
#############################################################################################################################
#from Mark:
#f_sed Frequency
#0.000000 0.099
#0.010000 0.001
#0.030000 0.005
#0.100000 0.010
#0.300000 0.025
#1.000000 0.280
#3.000000 0.300
#6.000000 0.280
# Generates fsed based on a random number: 0 <= num < 1
def get_fsed(num):
if num < .099:
r = 0
elif num < .1:
r = .01
elif num < .105:
r = .03
elif num < .115:
r = .1
elif num < .14:
r = .3
elif num < .42:
r = 1
elif num < .72:
r = 3
else:
r = 6
return float(r)
wfirstcontr = np.genfromtxt('WFIRST_pred_imaging.txt')
contr = wfirstcontr[:,1]
angsep = wfirstcontr[:,0] #l/D
angsep = (angsep * (575.0*u.nm)/(2.37*u.m)*u.rad).decompose().to(u.mas).value #mas
wfirstc = interp1d(angsep,contr,bounds_error = False, fill_value = 'extrapolate')
## completeness calculation
minangsep = 150
maxangsep = 450
inds = np.where((data['pl_maxangsep'].values > minangsep) & (data['pl_minangsep'].values < maxangsep))[0]
WAbins0 = np.arange(minangsep,maxangsep+1,1)
WAbins = np.hstack((0, WAbins0, np.inf))
dMagbins0 = np.arange(0,26.1,0.1)
dMagbins = np.hstack((dMagbins0,np.inf))
WAc,dMagc = np.meshgrid(WAbins0[:-1]+np.diff(WAbins0)/2.0,dMagbins0[:-1]+np.diff(dMagbins0)/2.0)
WAc = WAc.T
dMagc = dMagc.T
WAinds = np.arange(WAbins0.size-1)
dMaginds = np.arange(dMagbins0.size-1)
WAinds,dMaginds = np.meshgrid(WAinds,dMaginds)
WAinds = WAinds.T
dMaginds = dMaginds.T
dMaglimsc = wfirstc(WAc[:,0])
names = []
WAcs = []
dMagcs = []
iinds = []
jinds = []
hs = []
cs = []
goodinds = []
for j in inds:
row = data.iloc[j]
print(j, row['pl_name'])
amu = row['pl_orbsmax']
astd = (row['pl_orbsmaxerr1'] - row['pl_orbsmaxerr2'])/2.
if np.isnan(astd): astd = 0.01*amu
gena = lambda n: np.clip(np.random.randn(n)*astd + amu,0,np.inf)
emu = row['pl_orbeccen']
if np.isnan(emu):
gene = lambda n: 0.175/np.sqrt(np.pi/2.)*np.sqrt(-2.*np.log(1 - np.random.uniform(size=n)))
else:
estd = (row['pl_orbeccenerr1'] - row['pl_orbeccenerr2'])/2.
if np.isnan(estd) or (estd == 0):
estd = 0.01*emu
gene = lambda n: np.clip(np.random.randn(n)*estd + emu,0,0.99)
Imu = row['pl_orbincl']*np.pi/180.0
if np.isnan(Imu):
if row['pl_bmassprov'] == 'Msini':
Icrit = np.arcsin( ((row['pl_bmassj']*u.M_jupiter).to(u.M_earth)).value/((0.0800*u.M_sun).to(u.M_earth)).value )
Irange = [Icrit, np.pi - Icrit]
C = 0.5*(np.cos(Irange[0])-np.cos(Irange[1]))
genI = lambda n: np.arccos(np.cos(Irange[0]) - 2.*C*np.random.uniform(size=n))
else:
genI = lambda n: np.arccos(1 - 2.*np.random.uniform(size=n))
else:
Istd = (row['pl_orbinclerr1'] - row['pl_orbinclerr2'])/2.*np.pi/180.0
if np.isnan(Istd) or (Istd == 0):
Istd = Imu*0.01
genI = lambda n: np.random.randn(n)*Istd + Imu
wbarmu = row['pl_orblper']*np.pi/180.0
if np.isnan(wbarmu):
genwbar = lambda n: np.random.uniform(size=n,low=0.0,high=2*np.pi)
else:
wbarstd = (row['pl_orblpererr1'] - row['pl_orblpererr2'])/2.*np.pi/180.0
if np.isnan(wbarstd) or (wbarstd == 0):
wbarstd = wbarmu*0.01
genwbar = lambda n: np.random.randn(n)*wbarstd + wbarmu
fe = row['st_metfe']
if np.isnan(fe): fe = 0.0
n = int(1e6)
c = 0.
h = np.zeros((len(WAbins)-3, len(dMagbins)-2))
k = 0.0
cprev = 0.0
pdiff = 1.0
while (pdiff > 0.0001) | (k <3):
#for blah in range(100):
print("%d \t %5.5e \t %5.5e"%( k,pdiff,c))
a = gena(n)
e = gene(n)
I = genI(n)
O = np.random.uniform(size=n,low=0.0,high=2*np.pi)
wbar = genwbar(n)
w = O - wbar
# cl = cloudinterp(np.random.randn(n)*2 + 3)
vget_fsed = np.vectorize(get_fsed)
cl = vget_fsed(np.random.rand(n))
if (row['pl_radreflink'] == '<a refstr="CALCULATED VALUE" href="/docs/composite_calc.html" target=_blank>Calculated Value</a>'):
if row['pl_bmassprov'] == 'Msini':
Mp = ((row['pl_bmassj']*u.M_jupiter).to(u.M_earth)).value
Mp = Mp/np.sin(I)
else:
Mstd = (((row['pl_bmassjerr1'] - row['pl_bmassjerr2'])*u.M_jupiter).to(u.M_earth)).value
if np.isnan(Mstd):
Mstd = ((row['pl_bmassj']*u.M_jupiter).to(u.M_earth)).value * 0.1
Mp = np.random.randn(n)*Mstd + ((row['pl_bmassj']*u.M_jupiter).to(u.M_earth)).value
R = (RfromM(Mp)*u.R_earth).to(u.R_jupiter).value
R[R > 1.0] = 1.0
else:
Rmu = row['pl_radj']
Rstd = (row['pl_radjerr1'] - row['pl_radjerr2'])/2.
if np.isnan(Rstd): Rstd = Rmu*0.1
R = np.random.randn(n)*Rstd + Rmu
M0 = np.random.uniform(size=n,low=0.0,high=2*np.pi)
E = eccanom(M0, e)
nu = 2*np.arctan(np.sqrt((1+e)/(1-e))*np.tan(E/2))
# a1 = np.cos(O)*np.cos(w) - np.sin(O)*np.cos(I)*np.sin(w)
# a2 = np.sin(O)*np.cos(w) + np.cos(O)*np.cos(I)*np.sin(w)
# a3 = np.sin(I)*np.sin(w)
# A = a*np.vstack((a1, a2, a3))
# b1 = -np.sqrt(1 - e**2)*(np.cos(O)*np.sin(w) + np.sin(O)*np.cos(I)*np.cos(w))
# b2 = np.sqrt(1 - e**2)*(-np.sin(O)*np.sin(w) + np.cos(O)*np.cos(I)*np.cos(w))
# b3 = np.sqrt(1 - e**2)*np.sin(I)*np.cos(w)
# B = a*np.vstack((b1, b2, b3))
# r1 = np.cos(E) - e
# r2 = np.sin(E)
#
# rvec = (A*r1 + B*r2).T
# rnorm = np.linalg.norm(rvec, axis=1)
# s = np.linalg.norm(rvec[:,0:2], axis=1)
# beta = np.arccos(rvec[:,2]/rnorm)*u.rad
d = a * (1.0 - e ** 2.0) / (1 + e * np.cos(nu))
s = d * np.sqrt(4.0 * np.cos(2 * I) + 4 * np.cos(2 * nu + 2.0 * w) - 2.0 * np.cos(-2 * I + 2.0 * nu + 2 * w) - 2 * np.cos(2 * I + 2 * nu + 2 * w) + 12.0) / 4.0
beta = np.arccos(np.sin(I) * np.sin(nu + w)) * u.rad
rnorm = d
#phi = PPMod.calc_Phi(np.arccos(rvec[:,2]/rnorm)*u.rad) # planet phase
#dMag = deltaMag(0.5, R*u.R_jupiter, rnorm*u.AU, phi) # delta magnitude
#pphi = photinterps[float(feinterp(fe))][float(distinterp(np.mean(rnorm)))](beta.to(u.deg).value)
pphi = np.zeros(n)
for clevel in np.unique(cl):
tmpinds = cl == clevel
betatmp = beta[tmpinds]
binds = np.argsort(betatmp)
pphi[tmpinds] = photinterps2[float(feinterp(fe))][float(distinterp(np.mean(rnorm)))][clevel](betatmp.to(u.deg).value[binds],575./1000.)[np.argsort(binds)].flatten()
pphi[pphi <= 0.0] = 1e-16
#binds = np.argsort(beta)
#pphi = photinterps2[float(feinterp(fe))][float(distinterp(np.mean(rnorm)))][0.0](beta.to(u.deg).value[binds],575./1000.)[np.argsort(binds)].flatten()
dMag = deltaMag(1, R*u.R_jupiter, rnorm*u.AU, pphi)
WA = np.arctan((s*u.AU)/(row['st_dist']*u.pc)).to('mas').value # working angle
h += np.histogram2d(WA,dMag,bins=(WAbins,dMagbins))[0][1:-1,0:-1]
k += 1.0
dMaglimtmp = -2.5*np.log10(wfirstc(WA))
currc = float(len(np.where((WA >= minangsep) & (WA <= maxangsep) & (dMag <= dMaglimtmp))[0]))/n
#currc = float(len(np.where((WA >= minangsep) & (WA <= maxangsep) & (dMag <= 22.5))[0]))/n
cprev = c
if k == 1.0:
c = currc
else:
c = ((k-1)*c + currc)/k
if c == 0:
pdiff = 1.0
else:
pdiff = np.abs(c - cprev)/c
if (c == 0.0) & (k > 2):
break
if (c < 1e-5) & (k > 25):
break
if c != 0.0:
h = h/float(n*k)
names.append(np.array([row['pl_name']]*h.size))
WAcs.append(WAc.flatten())
dMagcs.append(dMagc.flatten())
hs.append(h.flatten())
iinds.append(WAinds.flatten())
jinds.append(dMaginds.flatten())
cs.append(c)
goodinds.append(j)
print("\n\n\n\n")
cs = np.array(cs)
goodinds = np.array(goodinds)
out2 = pandas.DataFrame({'Name': np.hstack(names),
'alpha': np.hstack(WAcs),
'dMag': np.hstack(dMagcs),
'H': np.hstack(hs),
'iind': np.hstack(iinds),
'jind': np.hstack(jinds)
})
out2 = out2[out2['H'].values != 0.]
out2['H'] = np.log10(out2['H'].values)
minCWA = []
maxCWA = []
minCdMag = []
maxCdMag = []
for j in range(len(goodinds)):
minCWA.append(np.floor(np.min(WAcs[j][hs[j] != 0])))
maxCWA.append(np.ceil(np.max(WAcs[j][hs[j] != 0])))
minCdMag.append(np.floor(np.min(dMagcs[j][hs[j] != 0])))
maxCdMag.append(np.ceil(np.max(dMagcs[j][hs[j] != 0])))
#np.savez('completeness_080718',cs=cs,goodinds=goodinds,minCWA=minCWA,maxCWA=maxCWA,minCdMag=minCdMag,maxCdMag=maxCdMag)
#out2.to_pickle('completeness_080718.pkl')
#####
#restore
out2 = pandas.read_pickle('completeness2_080718.pkl')
tmp = np.load('completeness2_080718.npz')
goodinds = tmp['goodinds']
minCdMag = tmp['minCdMag']
maxCWA = tmp['maxCWA']
minCWA = tmp['minCWA']
maxCdMag = tmp['maxCdMag']
cs = tmp['cs']
###################################################################
#build alias table
from astroquery.simbad import Simbad
starnames = data['pl_hostname'].unique()
s = Simbad()
s.add_votable_fields('ids')
baseurl = "https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI"
ids = []
aliases = []
noipacalias = []
nosimbadalias = []
badstars = []
priname = []
for j,star in enumerate(starnames):
print(j,star)
#get aliases from IPAC
r = requests.get(baseurl,{'table':'aliastable','objname':star})
if "ERROR" not in r.content:
tmp = r.content.strip().split("\n")
else:
noipacalias.append(star)
tmp = [star]
#get aliases from SIMBAD
r = s.query_object(star)
if r:
tmp += r['IDS'][0].split('|')
else:
if (len(noipacalias) == 0) or (noipacalias[-1] != star):
for t in tmp:
if (t not in['aliasdis',star]):
r = s.query_object(t)
if r:
tmp += r['IDS'][0].split('|')
break
if not r:
nosimbadalias.append(star)
else:
nosimbadalias.append(star)
#track stars with no records
if (len(noipacalias) > 0) and (len(nosimbadalias) > 0) and (noipacalias[-1] == star) and (nosimbadalias[-1] == star):
badstars.append(star)
if star not in tmp: tmp.append(star)
if 'aliasdis' in tmp: tmp.remove('aliasdis')
tmp = list(np.unique(tmp))
ids.append([j]*len(tmp))
aliases.append(tmp)
priname.append(list((np.array(tmp) == star).astype(int)))
#toggleoff = ['notesel','messel','bibsel','fluxsel','sizesel','mtsel','spsel','rvsel','pmsel','cooN','otypesel']
#url = """http://simbad.u-strasbg.fr/simbad/sim-id?output.format=ASCII&Ident=%s"""%starnames[j]
#for t in toggleoff:
# url += "&obj.%s=off"%t
out3 = pandas.DataFrame({'SID': np.hstack(ids),
'Alias': np.hstack(aliases),
'NEAName':np.hstack(priname)
})
#out3.to_pickle('aliases_080718.pkl')
###################################################################
#------write to db------------
namemxchar = np.array([len(n) for n in plannames]).max()
#testdb
engine = create_engine('mysql+pymysql://ds264@127.0.0.1/dsavrans_plandb',echo=False)
#proddb#################################################################################################
username = 'dsavrans_admin'
passwd = keyring.get_password('plandb_sql_login', username)
if passwd is None:
passwd = getpass.getpass("Password for mysql user %s:\n"%username)
keyring.set_password('plandb_sql_login', username, passwd)
engine = create_engine('mysql+pymysql://'+username+':'+passwd+'@sioslab.com/dsavrans_plandb',echo=False)
#proddb#################################################################################################
##cleanup as necessary
result = engine.execute("DROP TABLE IF EXISTS PlanetOrbits")
result = engine.execute("DROP TABLE IF EXISTS Completeness")
result = engine.execute("DROP TABLE IF EXISTS AltPlanetOrbits")
result = engine.execute("UPDATE KnownPlanets SET completeness=NULL")
result = engine.execute("UPDATE KnownPlanets SET compMinWA=NULL")
result = engine.execute("UPDATE KnownPlanets SET compMaxWA=NULL")
result = engine.execute("UPDATE KnownPlanets SET compMindMag=NULL")
result = engine.execute("UPDATE KnownPlanets SET compMaxdMag=NULL")
##write KnownPlanets
data.to_sql('KnownPlanets',engine,chunksize=100,if_exists='replace',
dtype={'pl_name':sqlalchemy.types.String(namemxchar),
'pl_hostname':sqlalchemy.types.String(namemxchar-2),
'pl_letter':sqlalchemy.types.CHAR(1)})
result = engine.execute("ALTER TABLE KnownPlanets ENGINE=InnoDB")
result = engine.execute("ALTER TABLE KnownPlanets ADD INDEX (pl_name)")
result = engine.execute("ALTER TABLE KnownPlanets ADD INDEX (pl_hostname)")
result = engine.execute("ALTER TABLE KnownPlanets ADD completeness double COMMENT 'completeness in 0.1 to 0.5 as bin'")
result = engine.execute("UPDATE KnownPlanets SET completeness=NULL where completeness is not NULL")
result = engine.execute("ALTER TABLE KnownPlanets ADD compMinWA double COMMENT 'min non-zero completeness WA'")
result = engine.execute("ALTER TABLE KnownPlanets ADD compMaxWA double COMMENT 'max non-zero completeness WA'")
result = engine.execute("ALTER TABLE KnownPlanets ADD compMindMag double COMMENT 'min non-zero completeness dMag'")
result = engine.execute("ALTER TABLE KnownPlanets ADD compMaxdMag double COMMENT 'max non-zero completeness dMag'")
for ind,c in zip(goodinds,cs):
result = engine.execute("UPDATE KnownPlanets SET completeness=%f where pl_name = '%s'"%(c,plannames[ind]))
for ind,minw,maxw,mind,maxd in zip(goodinds,minCWA,maxCWA,minCdMag,maxCdMag):
result = engine.execute("UPDATE KnownPlanets SET compMinWA=%f,compMaxWA=%f,compMindMag=%f,compMaxdMag=%f where pl_name = '%s'"%(minw,maxw,mind,maxd,plannames[ind]))
#add comments
coldefs = pandas.ExcelFile('coldefs.xlsx')
coldefs = coldefs.parse('Sheet1')
cols = coldefs['Column'][coldefs['Definition'].notnull()].values
cdefs = coldefs['Definition'][coldefs['Definition'].notnull()].values
cnames = coldefs['Name'][coldefs['Definition'].notnull()].values
result = engine.execute("show create table KnownPlanets")
res = result.fetchall()
res = res[0]['Create Table']
res = res.split("\n")
p = re.compile('`(\S+)`[\s\S]+')
keys = []
defs = []
for r in res:
r = r.strip().strip(',')
if "COMMENT" in r: continue
m = p.match(r)
if m:
keys.append(m.groups()[0])
defs.append(r)
for key,d in zip(keys,defs):
if not key in cols: continue
comm = """ALTER TABLE `KnownPlanets` CHANGE `%s` %s COMMENT "%s %s";"""%(key,d,cnames[cols == key][0].strip('"'),cdefs[cols == key][0])
print comm
r = engine.execute(comm)
#---------------------------------------------
#write planetorbits table
orbdata.to_sql('PlanetOrbits',engine,chunksize=100,if_exists='replace',dtype={'Name':sqlalchemy.types.String(namemxchar)})
result = engine.execute("ALTER TABLE PlanetOrbits ENGINE=InnoDB")
result = engine.execute("ALTER TABLE PlanetOrbits ADD INDEX (Name)")
result = engine.execute("ALTER TABLE PlanetOrbits ADD FOREIGN KEY (Name) REFERENCES KnownPlanets(pl_name) ON DELETE NO ACTION ON UPDATE NO ACTION");
#---------------------------------------------
#write completeness table
out2.to_sql('Completeness',engine,chunksize=100,if_exists='replace',dtype={'Name':sqlalchemy.types.String(namemxchar)})
result = engine.execute("ALTER TABLE Completeness ENGINE=InnoDB")
result = engine.execute("ALTER TABLE Completeness ADD INDEX (Name)")
result = engine.execute("ALTER TABLE Completeness ADD FOREIGN KEY (Name) REFERENCES KnownPlanets(pl_name) ON DELETE NO ACTION ON UPDATE NO ACTION");
#---------------------------------------------
#write altplanetorbits table
altorbdata.to_sql('AltPlanetOrbits',engine,chunksize=100,if_exists='replace',dtype={'Name':sqlalchemy.types.String(namemxchar)})
result = engine.execute("ALTER TABLE AltPlanetOrbits ENGINE=InnoDB")
result = engine.execute("ALTER TABLE AltPlanetOrbits ADD INDEX (Name)")
result = engine.execute("ALTER TABLE AltPlanetOrbits ADD FOREIGN KEY (Name) REFERENCES KnownPlanets(pl_name) ON DELETE NO ACTION ON UPDATE NO ACTION");
#---------------------------------------------------
#write alias table
aliasmxchar = np.array([len(n) for n in out3['Alias'].values]).max()
out3.to_sql('Aliases',engine,chunksize=100,if_exists='replace',dtype={'Alias':sqlalchemy.types.String(aliasmxchar)})
result = engine.execute("ALTER TABLE Aliases ENGINE=InnoDB")
result = engine.execute("ALTER TABLE Aliases ADD INDEX (Alias)")
result = engine.execute("ALTER TABLE Aliases ADD INDEX (SID)")
| mit |
rahul-c1/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 248 | 2588 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
mdeff/ntds_2017 | projects/reports/wikipedia_hyperlink/utils.py | 1 | 9130 | import wikipedia
import pickle
import matplotlib.pyplot as plt
import seaborn as sns
import networkx as nx
import numpy as np
import plotly.graph_objs as go
from sklearn import linear_model
def explore_page(page_title, network, to_explore, inner=False, all_nodes=None):
"""
This function explores the Wikipedia page who's title is `page_title`.
:param page_title: title of the Wikipedia page we want to explore
:param network: dictionary containing the nodes of the graph. If the current page is a real page, we
add it to this dictionary.
:param to_explore: Queue of the nodes to explore. We add all the links contained in the current page to this queue.
:param inner: Boolean. If we're looking for inner links in the network (last step of the scraping), then there is
no need to explore disambiguation pages or to append the links of the current page to the `to_explore` queue.
:param all_nodes: This is the set of all the nodes in the network. It is not None only `inner` is True. This is
useful in order to find the inner links (we take the intersection of the neighbors with the nodes of the network.
"""
if page_title not in network.keys():
# then this page has not been explored yet
try:
page = wikipedia.page(page_title) # get the page
title = page.original_title
if title not in network.keys(): # check if the original title has already been explored
if not inner:
network[title] = {'links': page.links, 'categories': page.categories, 'url': page.url}
for node in page.links:
to_explore.append(node)
else:
links = list(set(page.links).intersection(set(all_nodes)))
network[title] = {'links': links, 'categories': page.categories, 'url': page.url}
except wikipedia.DisambiguationError as e:
if inner:
# We are only looking for inner links, no need to explore the disambiguation page.
return
print('Disambiguation of : {}'.format(page_title))
links = e.options # those are the pages listed in the disambiguation page
for node in links:
to_explore.append(node)
except wikipedia.PageError:
# page does not exist nothing we can do
return
except wikipedia.RedirectError:
return
def save_obj(obj, name):
with open('data/' + name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name):
with open('data/' + name + '.pkl', 'rb') as f:
return pickle.load(f)
def get_bag_of_communities(network, partition):
"""
:param network: dictionary containing for each key (each node/page) a dictionary containing the page categories.
:param partition: list of the community assignment
:return: list of dictionaries, one dictionary per community. Each dictionary contains the categories of all the
nodes of a given community as keys and the number of pages in the community that have this category as values.
"""
k = len(set(partition)) # number of communities
bags_of_categories = [{} for _ in range(k)]
for i, title in enumerate(network.keys()):
cats = network[title]['categories']
if type(partition) == list:
label = partition[i]
else:
label = partition[title]
for c in cats:
if c in bags_of_categories[label].keys():
bags_of_categories[label][c] += 1
else:
bags_of_categories[label][c] = 1
return bags_of_categories
def plot_degree_distribution(graph, figsize=(15, 6), title='Degree distribution'):
"""
Plot the degree distribution of a given NetworkX graph.
"""
fig, ax = plt.subplots(figsize=figsize)
d = list(dict(graph.degree()).values())
sns.distplot(list(d), bins=16, ax=ax)
ax.set_ylabel('Number of vertices')
ax.set_xlabel('Degree')
ax.set_title(title)
plt.show()
def get_distribution(a):
"""
Returns the degree distribution of a given NetworkX graph. The returned
value is an array whose k'th entry is the probability of a node to have
the degree k.
"""
if type(a) == nx.classes.graph.Graph:
probabilities = np.zeros(len(a) + 1)
for k in nx.adjacency_matrix(a).sum(axis=1):
probabilities[k] += 1
probabilities = probabilities / np.sum(probabilities)
return probabilities
def print_distribution(graph, a=None, b=None, c=None, d=None):
"""
Plots a graph's degree distribution in natural, semi-log and log-log scales.
"""
probability_distribution = get_distribution(graph)
if a is None:
a = len(probability_distribution)
if b is None:
b = len(probability_distribution)
if c is None:
c = len(probability_distribution)
if d is None:
d = len(probability_distribution)
fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(15, 9))
ax[0, 0].set_title('Degree distribution')
ax[0, 0].plot(probability_distribution[:a])
ax[0, 1].set_title('Semi log x degree distribution')
ax[0, 1].semilogx(probability_distribution[:b])
ax[1, 0].set_title('Semi log y degree distribution')
ax[1, 0].semilogy(probability_distribution[:c], 's')
ax[1, 1].set_title('Log-log degree distribution')
ax[1, 1].loglog(probability_distribution[:d], 's')
plt.show()
def print_denoised_degree_distribution(graph, a=None, b=None, c=None, d=None):
probability_distribution = get_distribution(graph)
if a is None:
a = len(probability_distribution)
if b is None:
b = len(probability_distribution)
if c is None:
c = len(probability_distribution)
if d is None:
d = len(probability_distribution)
fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(15, 9))
ax[0, 0].set_title('Degree distribution')
ax[0, 0].plot(probability_distribution[:a])
ax[0, 1].set_title('De-noised degree distribution')
ax[0, 1].plot(probability_distribution[:b])
ax[1, 0].set_title('Log-log degree distribution')
ax[1, 0].loglog(probability_distribution[:c], 's')
ax[1, 1].set_title('Log-log de-noised degree distribution')
ax[1, 1].loglog(probability_distribution[:d], 's')
plt.show()
def linear_regression_coefficient(graph, title, limit=None):
probability_distribution = get_distribution(graph)
x = np.where(probability_distribution != 0)[0]
y = probability_distribution[x]
logx = np.log(x)
logy = np.log(y)
if limit is None:
limit = len(logx)\
logx = logx[:limit]
logy = logy[:limit]
logx = logx.reshape(-1, 1)
logy = logy.reshape(-1, 1)
regression = linear_model.LinearRegression()
regression.fit(logx, logy)
print('The best linear approximation is y = {0}x + {1}.'.format(regression.coef_, regression.intercept_))
print('R2 value for the regression : {}'.format(regression.score(logx, logy)))
fig, ax = plt.subplots(ncols=2, figsize=(15, 5))
ax[0].scatter(logx, logy, color='C0', label='Distribution')
ax[0].plot(logx, regression.coef_*logx + regression.intercept_, color='C1', label='Linear approximation')
ax[0].set_title(title)
ax[0].legend(loc='upper right')
sns.regplot(logx, logy[:, 0], ax=ax[1])
def build_communities(partition_type, positions, G, community2color):
edge_trace = go.Scattergl(
x=[],
y=[],
line=dict(width=0.5, color='#888'),
showlegend=False,
hoverinfo='none',
mode='lines')
for edge in G.edges():
x0, y0 = G.node[edge[0]][positions]
x1, y1 = G.node[edge[1]][positions]
edge_trace['x'] += [x0, x1]
edge_trace['y'] += [y0, y1]
node_trace = go.Scattergl(
x=[],
y=[],
text=[],
mode='markers',
marker=dict(
color=[],
size=10,
opacity=0.5)
)
for node in G.nodes():
x, y = G.node[node][positions]
node_trace['x'].append(x)
node_trace['y'].append(y)
for node in G.nodes:
node_trace['marker']['color'].append(community2color[int(G.nodes[node][partition_type])])
node_trace['text'].append(node)
data = [edge_trace, node_trace]
return data
def set_layout(title):
layout = go.Layout(
showlegend=False,
hovermode='closest',
margin=dict(b=20, l=5, r=5, t=40),
title='<br>Communities by {}'.format(title),
xaxis=dict(
autorange=True,
showgrid=False,
zeroline=False,
showline=False,
autotick=True,
ticks='',
showticklabels=False
),
yaxis=dict(
autorange=True,
showgrid=False,
zeroline=False,
showline=False,
autotick=True,
ticks='',
showticklabels=False
))
return layout
| mit |
rseubert/scikit-learn | sklearn/metrics/setup.py | 299 | 1024 | import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.c"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/matplotlib/style/core.py | 11 | 4957 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
"""
Core functions and attributes for the matplotlib style library:
``use``
Select style sheet to override the current matplotlib settings.
``context``
Context manager to use a style sheet temporarily.
``available``
List available style sheets.
``library``
A dictionary of style names and matplotlib settings.
"""
import os
import re
import contextlib
import matplotlib as mpl
from matplotlib import cbook
from matplotlib import rc_params_from_file
__all__ = ['use', 'context', 'available', 'library', 'reload_library']
BASE_LIBRARY_PATH = os.path.join(mpl.get_data_path(), 'stylelib')
# Users may want multiple library paths, so store a list of paths.
USER_LIBRARY_PATHS = [os.path.join(mpl._get_configdir(), 'stylelib')]
STYLE_EXTENSION = 'mplstyle'
STYLE_FILE_PATTERN = re.compile('([\S]+).%s$' % STYLE_EXTENSION)
def is_style_file(filename):
"""Return True if the filename looks like a style file."""
return STYLE_FILE_PATTERN.match(filename) is not None
def use(name):
"""Use matplotlib style settings from a known style sheet or from a file.
Parameters
----------
name : str or list of str
Name of style or path/URL to a style file. For a list of available
style names, see `style.available`. If given a list, each style is
applied from first to last in the list.
"""
if cbook.is_string_like(name):
name = [name]
for style in name:
if style in library:
mpl.rcParams.update(library[style])
else:
try:
rc = rc_params_from_file(style, use_default_template=False)
mpl.rcParams.update(rc)
except:
msg = ("'%s' not found in the style library and input is "
"not a valid URL or path. See `style.available` for "
"list of available styles.")
raise ValueError(msg % style)
@contextlib.contextmanager
def context(name, after_reset=False):
"""Context manager for using style settings temporarily.
Parameters
----------
name : str or list of str
Name of style or path/URL to a style file. For a list of available
style names, see `style.available`. If given a list, each style is
applied from first to last in the list.
after_reset : bool
If True, apply style after resetting settings to their defaults;
otherwise, apply style on top of the current settings.
"""
initial_settings = mpl.rcParams.copy()
if after_reset:
mpl.rcdefaults()
use(name)
yield
mpl.rcParams.update(initial_settings)
def load_base_library():
"""Load style library defined in this package."""
library = dict()
library.update(read_style_directory(BASE_LIBRARY_PATH))
return library
def iter_user_libraries():
for stylelib_path in USER_LIBRARY_PATHS:
stylelib_path = os.path.expanduser(stylelib_path)
if os.path.exists(stylelib_path) and os.path.isdir(stylelib_path):
yield stylelib_path
def update_user_library(library):
"""Update style library with user-defined rc files"""
for stylelib_path in iter_user_libraries():
styles = read_style_directory(stylelib_path)
update_nested_dict(library, styles)
return library
def iter_style_files(style_dir):
"""Yield file path and name of styles in the given directory."""
for path in os.listdir(style_dir):
filename = os.path.basename(path)
if is_style_file(filename):
match = STYLE_FILE_PATTERN.match(filename)
path = os.path.abspath(os.path.join(style_dir, path))
yield path, match.groups()[0]
def read_style_directory(style_dir):
"""Return dictionary of styles defined in `style_dir`."""
styles = dict()
for path, name in iter_style_files(style_dir):
styles[name] = rc_params_from_file(path, use_default_template=False)
return styles
def update_nested_dict(main_dict, new_dict):
"""Update nested dict (only level of nesting) with new values.
Unlike dict.update, this assumes that the values of the parent dict are
dicts (or dict-like), so you shouldn't replace the nested dict if it
already exists. Instead you should update the sub-dict.
"""
# update named styles specified by user
for name, rc_dict in six.iteritems(new_dict):
if name in main_dict:
main_dict[name].update(rc_dict)
else:
main_dict[name] = rc_dict
return main_dict
# Load style library
# ==================
_base_library = load_base_library()
library = None
available = []
def reload_library():
"""Reload style library."""
global library, available
library = update_user_library(_base_library)
available[:] = library.keys()
reload_library()
| mit |
jaytlennon/Dimensions | Aim3/papers/DD/PythonScripts/Env_Geo_Bootstrap.py | 4 | 6308 | from __future__ import division
import matplotlib.pyplot as plt
import geopy
from geopy.distance import vincenty
import skbio
import skbio.diversity
import skbio.diversity.beta
from skbio.diversity import beta_diversity
import pandas as pd
import linecache
import numpy as np
import scipy as sc
import scipy.spatial.distance as spd
import os
import sys
#import statsmodels.stats.api as sms
#import statsmodels.api as sm
import statsmodels.formula.api as smf
#from statsmodels.sandbox.regression.predstd import wls_prediction_std
from statsmodels.stats.outliers_influence import summary_table
mydir = os.path.expanduser("~/GitHub/Dimensions/Aim3/papers/DD")
mydir2 = os.path.expanduser("~/")
EnvDat = pd.read_csv("~/GitHub/Dimensions/Aim3/DATA/EnvData/20130801_PondDataMod.csv", sep = ",", header = False)
Active = pd.read_csv("~/GitHub/Dimensions/Aim3/DATA/ForPython/ActiveComm.csv", sep = ",", header = False)
All = pd.read_csv("~/GitHub/Dimensions/Aim3/DATA/ForPython/AllComm.csv", sep = ",", header = False)
#ColNames = list(Active.columns.values)
#print ColNames
dat2 = EnvDat[EnvDat['chla'] < 2000.0]
dat2 = dat2[dat2['pH'] > 1.0]
dat2 = dat2[dat2['Salinity'] > 0.0]
dat2 = dat2[dat2['TDS'] < 5.0]
results = pd.DataFrame()
trows = len(dat2.axes[0])
tcols = len(dat2.axes[1])
allrows = len(All.axes[0])
actrows = len(Active.axes[0])
if trows != allrows or trows != actrows: sys.exit()
ColNames = list(dat2.columns.values)
for i, column in enumerate(dat2):
if i >= 6:
#print ColNames[i], i
#print dat2.ix[:,i]
dat2.ix[:,i] = (dat2.ix[:,i] - np.mean(dat2.ix[:,i]))/np.std(dat2.ix[:,i])
#sys.exit()
SampleSize = [4, 6, 8, 10, 15, 20, 25, 30, 35, 40, 45]
#SampleSize = [4, 8, 12, 20, 40]
BrayRS = []
JaccRS = []
BracPVALS = []
JaccPVALS = []
for size in SampleSize:
print 'sample size:', size
bray_rs = []
jacc_rs = []
bray_pvals = []
jacc_pvals = []
ct = 0
while ct < 100:
#inds = np.random.choice(range(1, trows+1), size, replace=False)
GDists = []
EnvDists = []
BrayDists = []
JaccDists = []
for i in range(rows):
row1 = env.iloc[[i]]
lat1 = float(row1['lat']) # latitudes (north and south)
long1 = float(row1['long']) # longitudes (east and west)
env1 = row1.ix[:, 7:19]
for j in range(rows):
if j <= i: continue
row2 = env.iloc[[j]]
lat2 = float(row2['lat']) # latitudes (north and south)
long2 = float(row2['long']) # longitudes (east and west)
env2 = row2.ix[:, 7:19]
# geographic distance
geo_dist = vincenty((lat1, long1), (lat2, long2)).km
GDists.append(geo_dist)
# environmental distances
eu_dist = spd.euclidean(env1, env2) # Euclidean distance
EuDists.append(eu_dist)
man_dist = spd.cityblock(env1, env2) # Manhattan distance
ManDists.append(man_dist)
ham_dist = spd.hamming(env1, env2) # Square Euclidean distance
HamDists.append(ham_dist)
squ_dist = spd.sqeuclidean(env1, env2) # Square Euclidean distance
SquDists.append(squ_dist)
cos_dist = spd.cosine(env1, env2) # cosine distance
CosDists.append(cos_dist)
cor_dist = spd.correlation(env1, env2) # correlation distance
CorDists.append(cor_dist)
# get correlation coefficient and p-value
r, p = sc.stats.pearsonr(GDists, EuDists)
euc_rs.append(r)
euc_pvals.append(p)
r, p = sc.stats.pearsonr(GDists, SquDists)
squ_rs.append(r)
squ_pvals.append(p)
r, p = sc.stats.pearsonr(GDists, ManDists)
man_rs.append(r)
man_pvals.append(p)
r, p = sc.stats.pearsonr(GDists, CorDists)
cor_rs.append(r)
cor_pvals.append(p)
r, p = sc.stats.pearsonr(GDists, HamDists)
ham_rs.append(r)
ham_pvals.append(p)
r, p = sc.stats.pearsonr(GDists, CosDists)
cos_rs.append(r)
cos_pvals.append(p)
ct += 1
# get average cc and p-val
ManRS.append(np.mean(man_rs))
ManPVALS.append(np.mean(man_pvals))
HamRS.append(np.mean(ham_rs))
HamPVALS.append(np.mean(ham_pvals))
CorRS.append(np.mean(cor_rs))
CorPVALS.append(np.mean(cor_pvals))
EucRS.append(np.mean(euc_rs))
EucPVALS.append(np.mean(euc_pvals))
SquRS.append(np.mean(squ_rs))
SquPVALS.append(np.mean(squ_pvals))
CosRS.append(np.mean(cos_rs))
CosPVALS.append(np.mean(cos_pvals))
print "generating figure"
fig = plt.figure()
fig.add_subplot(2, 2, 2)
SampleSize = np.array(SampleSize)
SampleSize = np.log10((SampleSize*(SampleSize - 1))/2)
plt.plot(SampleSize, SquPVALS, color = '0.2', alpha= 0.6 , linewidth = 2, label='Square Euc.')
plt.plot(SampleSize, ManPVALS, color = 'SteelBlue', alpha= 0.6 , linewidth=2, label='Manhattan')
plt.plot(SampleSize, EucPVALS, color = 'm', alpha= 0.6 , linewidth=2, label='Euclidean')
plt.plot(SampleSize, HamPVALS, color = '0.7', alpha= 0.6 , linewidth=2, label='Hamming')
plt.plot(SampleSize, CorPVALS, color = 'Limegreen', alpha= 0.6 , linewidth=2, label='Correlation')
plt.plot(SampleSize, CosPVALS, color = 'red', alpha= 0.6 , linewidth=2, label='Cosine')
plt.plot([min(SampleSize), max(SampleSize)], [0.05, 0.05], c='0.2', ls='--')
plt.legend(bbox_to_anchor=(-0.03, 1.07, 2.46, .3), loc=10, ncol=3, mode="expand",prop={'size':12})
plt.ylim(0, 0.2)
plt.ylabel('$p$'+'-value', fontsize=14)
plt.xlabel('sample size', fontsize=14)
fig.add_subplot(2, 2, 2)
plt.plot(SampleSize, SquRS, color = '0.2', alpha= 0.6 , linewidth = 2)
plt.plot(SampleSize, ManRS, color = 'SteelBlue', alpha= 0.6 , linewidth=2)
plt.plot(SampleSize, EucRS, color = 'm', alpha= 0.6 , linewidth=2)
plt.plot(SampleSize, HamRS, color = '0.7', alpha= 0.6 , linewidth=2)
plt.plot(SampleSize, CorRS, color = 'Limegreen', alpha= 0.6 , linewidth=2)
plt.plot(SampleSize, CosRS, color = 'red', alpha= 0.6 , linewidth=2)
plt.ylabel('Correlation coefficient', fontsize=12)
plt.xlabel('sample size', fontsize=12)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
plt.savefig(mydir+'/DD/figs/SampleSizeDependency.png', dpi=300, bbox_inches = "tight")
#plt.show()
| gpl-3.0 |
asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/sklearn/neighbors/tests/test_approximate.py | 55 | 19053 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features).reshape(1, -1)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)].reshape(1, -1)
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)].reshape(1, -1)
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
@ignore_warnings
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [[1., 0.]]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slightly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
ignore_warnings(lshf.fit)(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consistent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
ignore_warnings(lshf.partial_fit)(X)
assert_array_equal(X, lshf._fit_X)
ignore_warnings(lshf.fit)(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
ignore_warnings(lshf.partial_fit)(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
ignore_warnings(lshf.fit)(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32).reshape(1, -1)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
ignore_warnings(lshf.fit)(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| mit |
ioam/holoviews | holoviews/tests/plotting/matplotlib/testelementplot.py | 2 | 6569 | import numpy as np
from holoviews.core.spaces import DynamicMap
from holoviews.element import Image, Curve, Scatter, Scatter3D
from holoviews.streams import Stream
from .testplot import TestMPLPlot, mpl_renderer
try:
from matplotlib.ticker import FormatStrFormatter, FuncFormatter, PercentFormatter
except:
pass
class TestElementPlot(TestMPLPlot):
def test_stream_cleanup(self):
stream = Stream.define(str('Test'), test=1)()
dmap = DynamicMap(lambda test: Curve([]), streams=[stream])
plot = mpl_renderer.get_plot(dmap)
self.assertTrue(bool(stream._subscribers))
plot.cleanup()
self.assertFalse(bool(stream._subscribers))
def test_element_xlabel(self):
element = Curve(range(10)).options(xlabel='custom x-label')
axes = mpl_renderer.get_plot(element).handles['axis']
self.assertEqual(axes.get_xlabel(), 'custom x-label')
def test_element_ylabel(self):
element = Curve(range(10)).options(ylabel='custom y-label')
axes = mpl_renderer.get_plot(element).handles['axis']
self.assertEqual(axes.get_ylabel(), 'custom y-label')
def test_element_xformatter_string(self):
curve = Curve(range(10)).options(xformatter='%d')
plot = mpl_renderer.get_plot(curve)
xaxis = plot.handles['axis'].xaxis
xformatter = xaxis.get_major_formatter()
self.assertIsInstance(xformatter, FormatStrFormatter)
self.assertEqual(xformatter.fmt, '%d')
def test_element_yformatter_string(self):
curve = Curve(range(10)).options(yformatter='%d')
plot = mpl_renderer.get_plot(curve)
yaxis = plot.handles['axis'].yaxis
yformatter = yaxis.get_major_formatter()
self.assertIsInstance(yformatter, FormatStrFormatter)
self.assertEqual(yformatter.fmt, '%d')
def test_element_zformatter_string(self):
curve = Scatter3D([]).options(zformatter='%d')
plot = mpl_renderer.get_plot(curve)
zaxis = plot.handles['axis'].zaxis
zformatter = zaxis.get_major_formatter()
self.assertIsInstance(zformatter, FormatStrFormatter)
self.assertEqual(zformatter.fmt, '%d')
def test_element_xformatter_function(self):
def formatter(value):
return str(value) + ' %'
curve = Curve(range(10)).options(xformatter=formatter)
plot = mpl_renderer.get_plot(curve)
xaxis = plot.handles['axis'].xaxis
xformatter = xaxis.get_major_formatter()
self.assertIsInstance(xformatter, FuncFormatter)
def test_element_yformatter_function(self):
def formatter(value):
return str(value) + ' %'
curve = Curve(range(10)).options(yformatter=formatter)
plot = mpl_renderer.get_plot(curve)
yaxis = plot.handles['axis'].yaxis
yformatter = yaxis.get_major_formatter()
self.assertIsInstance(yformatter, FuncFormatter)
def test_element_zformatter_function(self):
def formatter(value):
return str(value) + ' %'
curve = Scatter3D([]).options(zformatter=formatter)
plot = mpl_renderer.get_plot(curve)
zaxis = plot.handles['axis'].zaxis
zformatter = zaxis.get_major_formatter()
self.assertIsInstance(zformatter, FuncFormatter)
def test_element_xformatter_instance(self):
formatter = PercentFormatter()
curve = Curve(range(10)).options(xformatter=formatter)
plot = mpl_renderer.get_plot(curve)
xaxis = plot.handles['axis'].xaxis
xformatter = xaxis.get_major_formatter()
self.assertIs(xformatter, formatter)
def test_element_yformatter_instance(self):
formatter = PercentFormatter()
curve = Curve(range(10)).options(yformatter=formatter)
plot = mpl_renderer.get_plot(curve)
yaxis = plot.handles['axis'].yaxis
yformatter = yaxis.get_major_formatter()
self.assertIs(yformatter, formatter)
def test_element_zformatter_instance(self):
formatter = PercentFormatter()
curve = Scatter3D([]).options(zformatter=formatter)
plot = mpl_renderer.get_plot(curve)
zaxis = plot.handles['axis'].zaxis
zformatter = zaxis.get_major_formatter()
self.assertIs(zformatter, formatter)
class TestColorbarPlot(TestMPLPlot):
def test_colormapper_unsigned_int(self):
img = Image(np.array([[1, 1, 1, 2], [2, 2, 3, 4]]).astype('uint16'))
plot = mpl_renderer.get_plot(img)
artist = plot.handles['artist']
self.assertEqual(artist.get_clim(), (1, 4))
def test_colormapper_symmetric(self):
img = Image(np.array([[0, 1], [2, 3]])).options(symmetric=True)
plot = mpl_renderer.get_plot(img)
artist = plot.handles['artist']
self.assertEqual(artist.get_clim(), (-3, 3))
def test_colormapper_clims(self):
img = Image(np.array([[0, 1], [2, 3]])).options(clims=(0, 4))
plot = mpl_renderer.get_plot(img)
artist = plot.handles['artist']
self.assertEqual(artist.get_clim(), (0, 4))
def test_colormapper_color_levels(self):
img = Image(np.array([[0, 1], [2, 3]])).options(color_levels=5)
plot = mpl_renderer.get_plot(img)
artist = plot.handles['artist']
self.assertEqual(len(artist.cmap.colors), 5)
def test_colormapper_transparent_nan(self):
img = Image(np.array([[0, 1], [2, 3]])).options(clipping_colors={'NaN': 'transparent'})
plot = mpl_renderer.get_plot(img)
cmap = plot.handles['artist'].cmap
self.assertEqual(cmap._rgba_bad, (1.0, 1.0, 1.0, 0))
def test_colormapper_min_max_colors(self):
img = Image(np.array([[0, 1], [2, 3]])).options(clipping_colors={'min': 'red', 'max': 'blue'})
plot = mpl_renderer.get_plot(img)
cmap = plot.handles['artist'].cmap
self.assertEqual(cmap._rgba_under, (1.0, 0, 0, 1))
self.assertEqual(cmap._rgba_over, (0, 0, 1.0, 1))
def test_colorbar_label(self):
scatter = Scatter(np.random.rand(100, 3), vdims=["y", "c"]).options(color_index=2, colorbar=True)
plot = mpl_renderer.get_plot(scatter)
cbar_ax = plot.handles['cax']
self.assertEqual(cbar_ax.get_ylabel(), 'c')
def test_colorbar_label_style_mapping(self):
scatter = Scatter(np.random.rand(100, 3), vdims=["y", "color"]).options(color='color', colorbar=True)
plot = mpl_renderer.get_plot(scatter)
cbar_ax = plot.handles['cax']
self.assertEqual(cbar_ax.get_ylabel(), 'color')
| bsd-3-clause |
hainm/open-forcefield-group | nmr/ace_x_y_nh2/code/analyze_scalar_couplings.py | 2 | 1531 | import pandas as pd
import mdtraj as md
from ace_x_y_nh2_parameters import *
larger = pd.read_csv("./data/larger_couplings.csv")
smaller = pd.read_csv("./data/smaller_couplings.csv")
reference = []
for aa in amino_acids:
value = smaller.ix["G"][aa]
xyz = ["G%s" % aa, 0, value]
reference.append(xyz)
value = larger.ix["G"][aa]
xyz = ["G%s" % aa, 1, value]
reference.append(xyz)
value = larger.ix[aa]["G"]
xyz = ["%sG" % aa, 0, value]
reference.append(xyz)
value = smaller.ix[aa]["G"]
xyz = ["%sG" % aa, 1, value]
reference.append(xyz)
reference = pd.DataFrame(reference, columns=["seq", "resSeq", "value"])
reference = reference.set_index(["seq", "resSeq"]).value
reference = reference.drop_duplicates()
data = []
for (ff, water, seq) in products:
try:
aa0, aa1 = seq.split("_")[1]
aa_string = "%s%s" % (aa0, aa1)
t = md.load("./dcd/%s_%s_%s.dcd" % (ff, water, seq), top="./pdbs/%s.pdb" % (seq))[1500:]
except:
continue
#phi = md.compute_phi(t)[1] * 180 / np.pi
#J0, J1 = scalar_couplings.J3_HN_HA(phi).mean(0)
J0, J1 = md.compute_J3_HN_HA(t)[1].mean(0)
data.append([ff, water, aa_string, 0, J0])
data.append([ff, water, aa_string, 1, J1])
data = pd.DataFrame(data, columns=["ff", "water", "seq", "resSeq", "value"])
X = data.pivot_table(cols=["seq", "resSeq"], rows=["ff", "water"], values="value")
delta = X - reference
Z = (delta / 0.36)
rms_by_model = (Z ** 2.).mean(1) ** 0.5
rms_by_model
| gpl-2.0 |
kaichogami/scikit-learn | sklearn/feature_extraction/tests/test_feature_hasher.py | 258 | 2861 | from __future__ import unicode_literals
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from nose.tools import assert_raises, assert_true
from numpy.testing import assert_array_equal, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"dada": 42, "tzara": 37}, {"gaga": 17}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
| bsd-3-clause |
wkfwkf/statsmodels | statsmodels/tsa/vector_ar/var_model.py | 25 | 50516 | """
Vector Autoregression (VAR) processes
References
----------
Lutkepohl (2005) New Introduction to Multiple Time Series Analysis
"""
from __future__ import division, print_function
from statsmodels.compat.python import (range, lrange, string_types, StringIO, iteritems,
cStringIO)
from collections import defaultdict
import numpy as np
import numpy.linalg as npl
from numpy.linalg import cholesky as chol, solve
import scipy.stats as stats
import scipy.linalg as L
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.tools import chain_dot
from statsmodels.tools.linalg import logdet_symm
from statsmodels.tsa.tsatools import vec, unvec
from statsmodels.tsa.vector_ar.irf import IRAnalysis
from statsmodels.tsa.vector_ar.output import VARSummary
import statsmodels.tsa.tsatools as tsa
import statsmodels.tsa.vector_ar.output as output
import statsmodels.tsa.vector_ar.plotting as plotting
import statsmodels.tsa.vector_ar.util as util
import statsmodels.tsa.base.tsa_model as tsbase
import statsmodels.base.wrapper as wrap
mat = np.array
#-------------------------------------------------------------------------------
# VAR process routines
def ma_rep(coefs, maxn=10):
r"""
MA(\infty) representation of VAR(p) process
Parameters
----------
coefs : ndarray (p x k x k)
maxn : int
Number of MA matrices to compute
Notes
-----
VAR(p) process as
.. math:: y_t = A_1 y_{t-1} + \ldots + A_p y_{t-p} + u_t
can be equivalently represented as
.. math:: y_t = \mu + \sum_{i=0}^\infty \Phi_i u_{t-i}
e.g. can recursively compute the \Phi_i matrices with \Phi_0 = I_k
Returns
-------
phis : ndarray (maxn + 1 x k x k)
"""
p, k, k = coefs.shape
phis = np.zeros((maxn+1, k, k))
phis[0] = np.eye(k)
# recursively compute Phi matrices
for i in range(1, maxn + 1):
for j in range(1, i+1):
if j > p:
break
phis[i] += np.dot(phis[i-j], coefs[j-1])
return phis
def is_stable(coefs, verbose=False):
"""
Determine stability of VAR(p) system by examining the eigenvalues of the
VAR(1) representation
Parameters
----------
coefs : ndarray (p x k x k)
Returns
-------
is_stable : bool
"""
A_var1 = util.comp_matrix(coefs)
eigs = np.linalg.eigvals(A_var1)
if verbose:
print('Eigenvalues of VAR(1) rep')
for val in np.abs(eigs):
print(val)
return (np.abs(eigs) <= 1).all()
def var_acf(coefs, sig_u, nlags=None):
"""
Compute autocovariance function ACF_y(h) up to nlags of stable VAR(p)
process
Parameters
----------
coefs : ndarray (p x k x k)
Coefficient matrices A_i
sig_u : ndarray (k x k)
Covariance of white noise process u_t
nlags : int, optional
Defaults to order p of system
Notes
-----
Ref: Lutkepohl p.28-29
Returns
-------
acf : ndarray, (p, k, k)
"""
p, k, _ = coefs.shape
if nlags is None:
nlags = p
# p x k x k, ACF for lags 0, ..., p-1
result = np.zeros((nlags + 1, k, k))
result[:p] = _var_acf(coefs, sig_u)
# yule-walker equations
for h in range(p, nlags + 1):
# compute ACF for lag=h
# G(h) = A_1 G(h-1) + ... + A_p G(h-p)
for j in range(p):
result[h] += np.dot(coefs[j], result[h-j-1])
return result
def _var_acf(coefs, sig_u):
"""
Compute autocovariance function ACF_y(h) for h=1,...,p
Notes
-----
Lutkepohl (2005) p.29
"""
p, k, k2 = coefs.shape
assert(k == k2)
A = util.comp_matrix(coefs)
# construct VAR(1) noise covariance
SigU = np.zeros((k*p, k*p))
SigU[:k,:k] = sig_u
# vec(ACF) = (I_(kp)^2 - kron(A, A))^-1 vec(Sigma_U)
vecACF = L.solve(np.eye((k*p)**2) - np.kron(A, A), vec(SigU))
acf = unvec(vecACF)
acf = acf[:k].T.reshape((p, k, k))
return acf
def forecast(y, coefs, intercept, steps):
"""
Produce linear MSE forecast
Parameters
----------
y :
coefs :
intercept :
steps :
Returns
-------
forecasts : ndarray (steps x neqs)
Notes
-----
Lutkepohl p. 37
Also used by DynamicVAR class
"""
p = len(coefs)
k = len(coefs[0])
# initial value
forcs = np.zeros((steps, k)) + intercept
# h=0 forecast should be latest observation
# forcs[0] = y[-1]
# make indices easier to think about
for h in range(1, steps + 1):
# y_t(h) = intercept + sum_1^p A_i y_t_(h-i)
f = forcs[h - 1]
for i in range(1, p + 1):
# slightly hackish
if h - i <= 0:
# e.g. when h=1, h-1 = 0, which is y[-1]
prior_y = y[h - i - 1]
else:
# e.g. when h=2, h-1=1, which is forcs[0]
prior_y = forcs[h - i - 1]
# i=1 is coefs[0]
f = f + np.dot(coefs[i - 1], prior_y)
forcs[h - 1] = f
return forcs
def forecast_cov(ma_coefs, sig_u, steps):
"""
Compute theoretical forecast error variance matrices
Parameters
----------
Returns
-------
forc_covs : ndarray (steps x neqs x neqs)
"""
k = len(sig_u)
forc_covs = np.zeros((steps, k, k))
prior = np.zeros((k, k))
for h in range(steps):
# Sigma(h) = Sigma(h-1) + Phi Sig_u Phi'
phi = ma_coefs[h]
var = chain_dot(phi, sig_u, phi.T)
forc_covs[h] = prior = prior + var
return forc_covs
def var_loglike(resid, omega, nobs):
r"""
Returns the value of the VAR(p) log-likelihood.
Parameters
----------
resid : ndarray (T x K)
omega : ndarray
Sigma hat matrix. Each element i,j is the average product of the
OLS residual for variable i and the OLS residual for variable j or
np.dot(resid.T,resid)/nobs. There should be no correction for the
degrees of freedom.
nobs : int
Returns
-------
llf : float
The value of the loglikelihood function for a VAR(p) model
Notes
-----
The loglikelihood function for the VAR(p) is
.. math::
-\left(\frac{T}{2}\right)
\left(\ln\left|\Omega\right|-K\ln\left(2\pi\right)-K\right)
"""
logdet = logdet_symm(np.asarray(omega))
neqs = len(omega)
part1 = - (nobs * neqs / 2) * np.log(2 * np.pi)
part2 = - (nobs / 2) * (logdet + neqs)
return part1 + part2
def _reordered(self, order):
#Create new arrays to hold rearranged results from .fit()
endog = self.endog
endog_lagged = self.endog_lagged
params = self.params
sigma_u = self.sigma_u
names = self.names
k_ar = self.k_ar
endog_new = np.zeros([np.size(endog,0),np.size(endog,1)])
endog_lagged_new = np.zeros([np.size(endog_lagged,0), np.size(endog_lagged,1)])
params_new_inc, params_new = [np.zeros([np.size(params,0), np.size(params,1)])
for i in range(2)]
sigma_u_new_inc, sigma_u_new = [np.zeros([np.size(sigma_u,0), np.size(sigma_u,1)])
for i in range(2)]
num_end = len(self.params[0])
names_new = []
#Rearrange elements and fill in new arrays
k = self.k_trend
for i, c in enumerate(order):
endog_new[:,i] = self.endog[:,c]
if k > 0:
params_new_inc[0,i] = params[0,i]
endog_lagged_new[:,0] = endog_lagged[:,0]
for j in range(k_ar):
params_new_inc[i+j*num_end+k,:] = self.params[c+j*num_end+k,:]
endog_lagged_new[:,i+j*num_end+k] = endog_lagged[:,c+j*num_end+k]
sigma_u_new_inc[i,:] = sigma_u[c,:]
names_new.append(names[c])
for i, c in enumerate(order):
params_new[:,i] = params_new_inc[:,c]
sigma_u_new[:,i] = sigma_u_new_inc[:,c]
return VARResults(endog=endog_new, endog_lagged=endog_lagged_new,
params=params_new, sigma_u=sigma_u_new,
lag_order=self.k_ar, model=self.model,
trend='c', names=names_new, dates=self.dates)
#-------------------------------------------------------------------------------
# VARProcess class: for known or unknown VAR process
class VAR(tsbase.TimeSeriesModel):
r"""
Fit VAR(p) process and do lag order selection
.. math:: y_t = A_1 y_{t-1} + \ldots + A_p y_{t-p} + u_t
Parameters
----------
endog : array-like
2-d endogenous response variable. The independent variable.
dates : array-like
must match number of rows of endog
References
----------
Lutkepohl (2005) New Introduction to Multiple Time Series Analysis
"""
def __init__(self, endog, dates=None, freq=None, missing='none'):
super(VAR, self).__init__(endog, None, dates, freq, missing=missing)
if self.endog.ndim == 1:
raise ValueError("Only gave one variable to VAR")
self.y = self.endog #keep alias for now
self.neqs = self.endog.shape[1]
def _get_predict_start(self, start, k_ar):
if start is None:
start = k_ar
return super(VAR, self)._get_predict_start(start)
def predict(self, params, start=None, end=None, lags=1, trend='c'):
"""
Returns in-sample predictions or forecasts
"""
start = self._get_predict_start(start, lags)
end, out_of_sample = self._get_predict_end(end)
if end < start:
raise ValueError("end is before start")
if end == start + out_of_sample:
return np.array([])
k_trend = util.get_trendorder(trend)
k = self.neqs
k_ar = lags
predictedvalues = np.zeros((end + 1 - start + out_of_sample, k))
if k_trend != 0:
intercept = params[:k_trend]
predictedvalues += intercept
y = self.y
X = util.get_var_endog(y, lags, trend=trend, has_constant='raise')
fittedvalues = np.dot(X, params)
fv_start = start - k_ar
pv_end = min(len(predictedvalues), len(fittedvalues) - fv_start)
fv_end = min(len(fittedvalues), end-k_ar+1)
predictedvalues[:pv_end] = fittedvalues[fv_start:fv_end]
if not out_of_sample:
return predictedvalues
# fit out of sample
y = y[-k_ar:]
coefs = params[k_trend:].reshape((k_ar, k, k)).swapaxes(1,2)
predictedvalues[pv_end:] = forecast(y, coefs, intercept, out_of_sample)
return predictedvalues
def fit(self, maxlags=None, method='ols', ic=None, trend='c',
verbose=False):
"""
Fit the VAR model
Parameters
----------
maxlags : int
Maximum number of lags to check for order selection, defaults to
12 * (nobs/100.)**(1./4), see select_order function
method : {'ols'}
Estimation method to use
ic : {'aic', 'fpe', 'hqic', 'bic', None}
Information criterion to use for VAR order selection.
aic : Akaike
fpe : Final prediction error
hqic : Hannan-Quinn
bic : Bayesian a.k.a. Schwarz
verbose : bool, default False
Print order selection output to the screen
trend, str {"c", "ct", "ctt", "nc"}
"c" - add constant
"ct" - constant and trend
"ctt" - constant, linear and quadratic trend
"nc" - co constant, no trend
Note that these are prepended to the columns of the dataset.
Notes
-----
Lutkepohl pp. 146-153
Returns
-------
est : VARResults
"""
lags = maxlags
if trend not in ['c', 'ct', 'ctt', 'nc']:
raise ValueError("trend '{}' not supported for VAR".format(trend))
if ic is not None:
selections = self.select_order(maxlags=maxlags, verbose=verbose)
if ic not in selections:
raise Exception("%s not recognized, must be among %s"
% (ic, sorted(selections)))
lags = selections[ic]
if verbose:
print('Using %d based on %s criterion' % (lags, ic))
else:
if lags is None:
lags = 1
k_trend = util.get_trendorder(trend)
self.exog_names = util.make_lag_names(self.endog_names, lags, k_trend)
self.nobs = len(self.endog) - lags
return self._estimate_var(lags, trend=trend)
def _estimate_var(self, lags, offset=0, trend='c'):
"""
lags : int
offset : int
Periods to drop from beginning-- for order selection so it's an
apples-to-apples comparison
trend : string or None
As per above
"""
# have to do this again because select_order doesn't call fit
self.k_trend = k_trend = util.get_trendorder(trend)
if offset < 0: # pragma: no cover
raise ValueError('offset must be >= 0')
y = self.y[offset:]
z = util.get_var_endog(y, lags, trend=trend, has_constant='raise')
y_sample = y[lags:]
# Lutkepohl p75, about 5x faster than stated formula
params = np.linalg.lstsq(z, y_sample)[0]
resid = y_sample - np.dot(z, params)
# Unbiased estimate of covariance matrix $\Sigma_u$ of the white noise
# process $u$
# equivalent definition
# .. math:: \frac{1}{T - Kp - 1} Y^\prime (I_T - Z (Z^\prime Z)^{-1}
# Z^\prime) Y
# Ref: Lutkepohl p.75
# df_resid right now is T - Kp - 1, which is a suggested correction
avobs = len(y_sample)
df_resid = avobs - (self.neqs * lags + k_trend)
sse = np.dot(resid.T, resid)
omega = sse / df_resid
varfit = VARResults(y, z, params, omega, lags, names=self.endog_names,
trend=trend, dates=self.data.dates, model=self)
return VARResultsWrapper(varfit)
def select_order(self, maxlags=None, verbose=True):
"""
Compute lag order selections based on each of the available information
criteria
Parameters
----------
maxlags : int
if None, defaults to 12 * (nobs/100.)**(1./4)
verbose : bool, default True
If True, print table of info criteria and selected orders
Returns
-------
selections : dict {info_crit -> selected_order}
"""
if maxlags is None:
maxlags = int(round(12*(len(self.endog)/100.)**(1/4.)))
ics = defaultdict(list)
for p in range(maxlags + 1):
# exclude some periods to same amount of data used for each lag
# order
result = self._estimate_var(p, offset=maxlags-p)
for k, v in iteritems(result.info_criteria):
ics[k].append(v)
selected_orders = dict((k, mat(v).argmin())
for k, v in iteritems(ics))
if verbose:
output.print_ic_table(ics, selected_orders)
return selected_orders
class VARProcess(object):
"""
Class represents a known VAR(p) process
Parameters
----------
coefs : ndarray (p x k x k)
intercept : ndarray (length k)
sigma_u : ndarray (k x k)
names : sequence (length k)
Returns
-------
**Attributes**:
"""
def __init__(self, coefs, intercept, sigma_u, names=None):
self.k_ar = len(coefs)
self.neqs = coefs.shape[1]
self.coefs = coefs
self.intercept = intercept
self.sigma_u = sigma_u
self.names = names
def get_eq_index(self, name):
"Return integer position of requested equation name"
return util.get_index(self.names, name)
def __str__(self):
output = ('VAR(%d) process for %d-dimensional response y_t'
% (self.k_ar, self.neqs))
output += '\nstable: %s' % self.is_stable()
output += '\nmean: %s' % self.mean()
return output
def is_stable(self, verbose=False):
"""Determine stability based on model coefficients
Parameters
----------
verbose : bool
Print eigenvalues of the VAR(1) companion
Notes
-----
Checks if det(I - Az) = 0 for any mod(z) <= 1, so all the eigenvalues of
the companion matrix must lie outside the unit circle
"""
return is_stable(self.coefs, verbose=verbose)
def plotsim(self, steps=1000):
"""
Plot a simulation from the VAR(p) process for the desired number of
steps
"""
Y = util.varsim(self.coefs, self.intercept, self.sigma_u, steps=steps)
plotting.plot_mts(Y)
def mean(self):
r"""Mean of stable process
Lutkepohl eq. 2.1.23
.. math:: \mu = (I - A_1 - \dots - A_p)^{-1} \alpha
"""
return solve(self._char_mat, self.intercept)
def ma_rep(self, maxn=10):
r"""Compute MA(:math:`\infty`) coefficient matrices
Parameters
----------
maxn : int
Number of coefficient matrices to compute
Returns
-------
coefs : ndarray (maxn x k x k)
"""
return ma_rep(self.coefs, maxn=maxn)
def orth_ma_rep(self, maxn=10, P=None):
r"""Compute Orthogonalized MA coefficient matrices using P matrix such
that :math:`\Sigma_u = PP^\prime`. P defaults to the Cholesky
decomposition of :math:`\Sigma_u`
Parameters
----------
maxn : int
Number of coefficient matrices to compute
P : ndarray (k x k), optional
Matrix such that Sigma_u = PP', defaults to Cholesky descomp
Returns
-------
coefs : ndarray (maxn x k x k)
"""
if P is None:
P = self._chol_sigma_u
ma_mats = self.ma_rep(maxn=maxn)
return mat([np.dot(coefs, P) for coefs in ma_mats])
def long_run_effects(self):
"""Compute long-run effect of unit impulse
.. math::
\Psi_\infty = \sum_{i=0}^\infty \Phi_i
"""
return L.inv(self._char_mat)
@cache_readonly
def _chol_sigma_u(self):
return chol(self.sigma_u)
@cache_readonly
def _char_mat(self):
return np.eye(self.neqs) - self.coefs.sum(0)
def acf(self, nlags=None):
"""Compute theoretical autocovariance function
Returns
-------
acf : ndarray (p x k x k)
"""
return var_acf(self.coefs, self.sigma_u, nlags=nlags)
def acorr(self, nlags=None):
"""Compute theoretical autocorrelation function
Returns
-------
acorr : ndarray (p x k x k)
"""
return util.acf_to_acorr(self.acf(nlags=nlags))
def plot_acorr(self, nlags=10, linewidth=8):
"Plot theoretical autocorrelation function"
plotting.plot_full_acorr(self.acorr(nlags=nlags), linewidth=linewidth)
def forecast(self, y, steps):
"""Produce linear minimum MSE forecasts for desired number of steps
ahead, using prior values y
Parameters
----------
y : ndarray (p x k)
steps : int
Returns
-------
forecasts : ndarray (steps x neqs)
Notes
-----
Lutkepohl pp 37-38
"""
return forecast(y, self.coefs, self.intercept, steps)
def mse(self, steps):
"""
Compute theoretical forecast error variance matrices
Parameters
----------
steps : int
Number of steps ahead
Notes
-----
.. math:: \mathrm{MSE}(h) = \sum_{i=0}^{h-1} \Phi \Sigma_u \Phi^T
Returns
-------
forc_covs : ndarray (steps x neqs x neqs)
"""
ma_coefs = self.ma_rep(steps)
k = len(self.sigma_u)
forc_covs = np.zeros((steps, k, k))
prior = np.zeros((k, k))
for h in range(steps):
# Sigma(h) = Sigma(h-1) + Phi Sig_u Phi'
phi = ma_coefs[h]
var = chain_dot(phi, self.sigma_u, phi.T)
forc_covs[h] = prior = prior + var
return forc_covs
forecast_cov = mse
def _forecast_vars(self, steps):
covs = self.forecast_cov(steps)
# Take diagonal for each cov
inds = np.arange(self.neqs)
return covs[:, inds, inds]
def forecast_interval(self, y, steps, alpha=0.05):
"""Construct forecast interval estimates assuming the y are Gaussian
Parameters
----------
Notes
-----
Lutkepohl pp. 39-40
Returns
-------
(lower, mid, upper) : (ndarray, ndarray, ndarray)
"""
assert(0 < alpha < 1)
q = util.norm_signif_level(alpha)
point_forecast = self.forecast(y, steps)
sigma = np.sqrt(self._forecast_vars(steps))
forc_lower = point_forecast - q * sigma
forc_upper = point_forecast + q * sigma
return point_forecast, forc_lower, forc_upper
#-------------------------------------------------------------------------------
# VARResults class
class VARResults(VARProcess):
"""Estimate VAR(p) process with fixed number of lags
Parameters
----------
endog : array
endog_lagged : array
params : array
sigma_u : array
lag_order : int
model : VAR model instance
trend : str {'nc', 'c', 'ct'}
names : array-like
List of names of the endogenous variables in order of appearance in `endog`.
dates
Returns
-------
**Attributes**
aic
bic
bse
coefs : ndarray (p x K x K)
Estimated A_i matrices, A_i = coefs[i-1]
cov_params
dates
detomega
df_model : int
df_resid : int
endog
endog_lagged
fittedvalues
fpe
intercept
info_criteria
k_ar : int
k_trend : int
llf
model
names
neqs : int
Number of variables (equations)
nobs : int
n_totobs : int
params
k_ar : int
Order of VAR process
params : ndarray (Kp + 1) x K
A_i matrices and intercept in stacked form [int A_1 ... A_p]
pvalues
names : list
variables names
resid
roots : array
The roots of the VAR process are the solution to
(I - coefs[0]*z - coefs[1]*z**2 ... - coefs[p-1]*z**k_ar) = 0.
Note that the inverse roots are returned, and stability requires that
the roots lie outside the unit circle.
sigma_u : ndarray (K x K)
Estimate of white noise process variance Var[u_t]
sigma_u_mle
stderr
trenorder
tvalues
y :
ys_lagged
"""
_model_type = 'VAR'
def __init__(self, endog, endog_lagged, params, sigma_u, lag_order,
model=None, trend='c', names=None, dates=None):
self.model = model
self.y = self.endog = endog #keep alias for now
self.ys_lagged = self.endog_lagged = endog_lagged #keep alias for now
self.dates = dates
self.n_totobs, neqs = self.y.shape
self.nobs = self.n_totobs - lag_order
k_trend = util.get_trendorder(trend)
if k_trend > 0: # make this the polynomial trend order
trendorder = k_trend - 1
else:
trendorder = None
self.k_trend = k_trend
self.trendorder = trendorder
self.exog_names = util.make_lag_names(names, lag_order, k_trend)
self.params = params
# Initialize VARProcess parent class
# construct coefficient matrices
# Each matrix needs to be transposed
reshaped = self.params[self.k_trend:]
reshaped = reshaped.reshape((lag_order, neqs, neqs))
# Need to transpose each coefficient matrix
intercept = self.params[0]
coefs = reshaped.swapaxes(1, 2).copy()
super(VARResults, self).__init__(coefs, intercept, sigma_u, names=names)
def plot(self):
"""Plot input time series
"""
plotting.plot_mts(self.y, names=self.names, index=self.dates)
@property
def df_model(self):
"""Number of estimated parameters, including the intercept / trends
"""
return self.neqs * self.k_ar + self.k_trend
@property
def df_resid(self):
"Number of observations minus number of estimated parameters"
return self.nobs - self.df_model
@cache_readonly
def fittedvalues(self):
"""The predicted insample values of the response variables of the model.
"""
return np.dot(self.ys_lagged, self.params)
@cache_readonly
def resid(self):
"""Residuals of response variable resulting from estimated coefficients
"""
return self.y[self.k_ar:] - self.fittedvalues
def sample_acov(self, nlags=1):
return _compute_acov(self.y[self.k_ar:], nlags=nlags)
def sample_acorr(self, nlags=1):
acovs = self.sample_acov(nlags=nlags)
return _acovs_to_acorrs(acovs)
def plot_sample_acorr(self, nlags=10, linewidth=8):
"Plot theoretical autocorrelation function"
plotting.plot_full_acorr(self.sample_acorr(nlags=nlags),
linewidth=linewidth)
def resid_acov(self, nlags=1):
"""
Compute centered sample autocovariance (including lag 0)
Parameters
----------
nlags : int
Returns
-------
"""
return _compute_acov(self.resid, nlags=nlags)
def resid_acorr(self, nlags=1):
"""
Compute sample autocorrelation (including lag 0)
Parameters
----------
nlags : int
Returns
-------
"""
acovs = self.resid_acov(nlags=nlags)
return _acovs_to_acorrs(acovs)
@cache_readonly
def resid_corr(self):
"Centered residual correlation matrix"
return self.resid_acorr(0)[0]
@cache_readonly
def sigma_u_mle(self):
"""(Biased) maximum likelihood estimate of noise process covariance
"""
return self.sigma_u * self.df_resid / self.nobs
@cache_readonly
def cov_params(self):
"""Estimated variance-covariance of model coefficients
Notes
-----
Covariance of vec(B), where B is the matrix
[intercept, A_1, ..., A_p] (K x (Kp + 1))
Adjusted to be an unbiased estimator
Ref: Lutkepohl p.74-75
"""
z = self.ys_lagged
return np.kron(L.inv(np.dot(z.T, z)), self.sigma_u)
def cov_ybar(self):
r"""Asymptotically consistent estimate of covariance of the sample mean
.. math::
\sqrt(T) (\bar{y} - \mu) \rightarrow {\cal N}(0, \Sigma_{\bar{y}})\\
\Sigma_{\bar{y}} = B \Sigma_u B^\prime, \text{where } B = (I_K - A_1
- \cdots - A_p)^{-1}
Notes
-----
Lutkepohl Proposition 3.3
"""
Ainv = L.inv(np.eye(self.neqs) - self.coefs.sum(0))
return chain_dot(Ainv, self.sigma_u, Ainv.T)
#------------------------------------------------------------
# Estimation-related things
@cache_readonly
def _zz(self):
# Z'Z
return np.dot(self.ys_lagged.T, self.ys_lagged)
@property
def _cov_alpha(self):
"""
Estimated covariance matrix of model coefficients ex intercept
"""
# drop intercept and trend
return self.cov_params[self.k_trend*self.neqs:, self.k_trend*self.neqs:]
@cache_readonly
def _cov_sigma(self):
"""
Estimated covariance matrix of vech(sigma_u)
"""
D_K = tsa.duplication_matrix(self.neqs)
D_Kinv = npl.pinv(D_K)
sigxsig = np.kron(self.sigma_u, self.sigma_u)
return 2 * chain_dot(D_Kinv, sigxsig, D_Kinv.T)
@cache_readonly
def llf(self):
"Compute VAR(p) loglikelihood"
return var_loglike(self.resid, self.sigma_u_mle, self.nobs)
@cache_readonly
def stderr(self):
"""Standard errors of coefficients, reshaped to match in size
"""
stderr = np.sqrt(np.diag(self.cov_params))
return stderr.reshape((self.df_model, self.neqs), order='C')
bse = stderr # statsmodels interface?
@cache_readonly
def tvalues(self):
"""Compute t-statistics. Use Student-t(T - Kp - 1) = t(df_resid) to test
significance.
"""
return self.params / self.stderr
@cache_readonly
def pvalues(self):
"""Two-sided p-values for model coefficients from Student t-distribution
"""
return stats.t.sf(np.abs(self.tvalues), self.df_resid)*2
def plot_forecast(self, steps, alpha=0.05, plot_stderr=True):
"""
Plot forecast
"""
mid, lower, upper = self.forecast_interval(self.y[-self.k_ar:], steps,
alpha=alpha)
plotting.plot_var_forc(self.y, mid, lower, upper, names=self.names,
plot_stderr=plot_stderr)
# Forecast error covariance functions
def forecast_cov(self, steps=1):
r"""Compute forecast covariance matrices for desired number of steps
Parameters
----------
steps : int
Notes
-----
.. math:: \Sigma_{\hat y}(h) = \Sigma_y(h) + \Omega(h) / T
Ref: Lutkepohl pp. 96-97
Returns
-------
covs : ndarray (steps x k x k)
"""
mse = self.mse(steps)
omegas = self._omega_forc_cov(steps)
return mse + omegas / self.nobs
#Monte Carlo irf standard errors
def irf_errband_mc(self, orth=False, repl=1000, T=10,
signif=0.05, seed=None, burn=100, cum=False):
"""
Compute Monte Carlo integrated error bands assuming normally
distributed for impulse response functions
Parameters
----------
orth: bool, default False
Compute orthoganalized impulse response error bands
repl: int
number of Monte Carlo replications to perform
T: int, default 10
number of impulse response periods
signif: float (0 < signif <1)
Significance level for error bars, defaults to 95% CI
seed: int
np.random.seed for replications
burn: int
number of initial observations to discard for simulation
cum: bool, default False
produce cumulative irf error bands
Notes
-----
Lutkepohl (2005) Appendix D
Returns
-------
Tuple of lower and upper arrays of ma_rep monte carlo standard errors
"""
neqs = self.neqs
mean = self.mean()
k_ar = self.k_ar
coefs = self.coefs
sigma_u = self.sigma_u
intercept = self.intercept
df_model = self.df_model
nobs = self.nobs
ma_coll = np.zeros((repl, T+1, neqs, neqs))
if (orth == True and cum == True):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
orth_ma_rep(maxn=T).cumsum(axis=0)
elif (orth == True and cum == False):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
orth_ma_rep(maxn=T)
elif (orth == False and cum == True):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
ma_rep(maxn=T).cumsum(axis=0)
elif (orth == False and cum == False):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
ma_rep(maxn=T)
for i in range(repl):
#discard first hundred to eliminate correct for starting bias
sim = util.varsim(coefs, intercept, sigma_u, steps=nobs+burn)
sim = sim[burn:]
ma_coll[i,:,:,:] = fill_coll(sim)
ma_sort = np.sort(ma_coll, axis=0) #sort to get quantiles
index = round(signif/2*repl)-1,round((1-signif/2)*repl)-1
lower = ma_sort[index[0],:, :, :]
upper = ma_sort[index[1],:, :, :]
return lower, upper
def irf_resim(self, orth=False, repl=1000, T=10,
seed=None, burn=100, cum=False):
"""
Simulates impulse response function, returning an array of simulations.
Used for Sims-Zha error band calculation.
Parameters
----------
orth: bool, default False
Compute orthoganalized impulse response error bands
repl: int
number of Monte Carlo replications to perform
T: int, default 10
number of impulse response periods
signif: float (0 < signif <1)
Significance level for error bars, defaults to 95% CI
seed: int
np.random.seed for replications
burn: int
number of initial observations to discard for simulation
cum: bool, default False
produce cumulative irf error bands
Notes
-----
Sims, Christoper A., and Tao Zha. 1999. "Error Bands for Impulse Response." Econometrica 67: 1113-1155.
Returns
-------
Array of simulated impulse response functions
"""
neqs = self.neqs
mean = self.mean()
k_ar = self.k_ar
coefs = self.coefs
sigma_u = self.sigma_u
intercept = self.intercept
df_model = self.df_model
nobs = self.nobs
if seed is not None:
np.random.seed(seed=seed)
ma_coll = np.zeros((repl, T+1, neqs, neqs))
if (orth == True and cum == True):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
orth_ma_rep(maxn=T).cumsum(axis=0)
elif (orth == True and cum == False):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
orth_ma_rep(maxn=T)
elif (orth == False and cum == True):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
ma_rep(maxn=T).cumsum(axis=0)
elif (orth == False and cum == False):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
ma_rep(maxn=T)
for i in range(repl):
#discard first hundred to eliminate correct for starting bias
sim = util.varsim(coefs, intercept, sigma_u, steps=nobs+burn)
sim = sim[burn:]
ma_coll[i,:,:,:] = fill_coll(sim)
return ma_coll
def _omega_forc_cov(self, steps):
# Approximate MSE matrix \Omega(h) as defined in Lut p97
G = self._zz
Ginv = L.inv(G)
# memoize powers of B for speedup
# TODO: see if can memoize better
B = self._bmat_forc_cov()
_B = {}
def bpow(i):
if i not in _B:
_B[i] = np.linalg.matrix_power(B, i)
return _B[i]
phis = self.ma_rep(steps)
sig_u = self.sigma_u
omegas = np.zeros((steps, self.neqs, self.neqs))
for h in range(1, steps + 1):
if h == 1:
omegas[h-1] = self.df_model * self.sigma_u
continue
om = omegas[h-1]
for i in range(h):
for j in range(h):
Bi = bpow(h - 1 - i)
Bj = bpow(h - 1 - j)
mult = np.trace(chain_dot(Bi.T, Ginv, Bj, G))
om += mult * chain_dot(phis[i], sig_u, phis[j].T)
omegas[h-1] = om
return omegas
def _bmat_forc_cov(self):
# B as defined on p. 96 of Lut
upper = np.zeros((1, self.df_model))
upper[0,0] = 1
lower_dim = self.neqs * (self.k_ar - 1)
I = np.eye(lower_dim)
lower = np.column_stack((np.zeros((lower_dim, 1)), I,
np.zeros((lower_dim, self.neqs))))
return np.vstack((upper, self.params.T, lower))
def summary(self):
"""Compute console output summary of estimates
Returns
-------
summary : VARSummary
"""
return VARSummary(self)
def irf(self, periods=10, var_decomp=None, var_order=None):
"""Analyze impulse responses to shocks in system
Parameters
----------
periods : int
var_decomp : ndarray (k x k), lower triangular
Must satisfy Omega = P P', where P is the passed matrix. Defaults to
Cholesky decomposition of Omega
var_order : sequence
Alternate variable order for Cholesky decomposition
Returns
-------
irf : IRAnalysis
"""
if var_order is not None:
raise NotImplementedError('alternate variable order not implemented'
' (yet)')
return IRAnalysis(self, P=var_decomp, periods=periods)
def fevd(self, periods=10, var_decomp=None):
"""
Compute forecast error variance decomposition ("fevd")
Returns
-------
fevd : FEVD instance
"""
return FEVD(self, P=var_decomp, periods=periods)
def reorder(self, order):
"""Reorder variables for structural specification
"""
if len(order) != len(self.params[0,:]):
raise ValueError("Reorder specification length should match number of endogenous variables")
#This convert order to list of integers if given as strings
if isinstance(order[0], string_types):
order_new = []
for i, nam in enumerate(order):
order_new.append(self.names.index(order[i]))
order = order_new
return _reordered(self, order)
#-------------------------------------------------------------------------------
# VAR Diagnostics: Granger-causality, whiteness of residuals, normality, etc.
def test_causality(self, equation, variables, kind='f', signif=0.05,
verbose=True):
"""Compute test statistic for null hypothesis of Granger-noncausality,
general function to test joint Granger-causality of multiple variables
Parameters
----------
equation : string or int
Equation to test for causality
variables : sequence (of strings or ints)
List, tuple, etc. of variables to test for Granger-causality
kind : {'f', 'wald'}
Perform F-test or Wald (chi-sq) test
signif : float, default 5%
Significance level for computing critical values for test,
defaulting to standard 0.95 level
Notes
-----
Null hypothesis is that there is no Granger-causality for the indicated
variables. The degrees of freedom in the F-test are based on the
number of variables in the VAR system, that is, degrees of freedom
are equal to the number of equations in the VAR times degree of freedom
of a single equation.
Returns
-------
results : dict
"""
if isinstance(variables, (string_types, int, np.integer)):
variables = [variables]
k, p = self.neqs, self.k_ar
# number of restrictions
N = len(variables) * self.k_ar
# Make restriction matrix
C = np.zeros((N, k ** 2 * p + k), dtype=float)
eq_index = self.get_eq_index(equation)
vinds = mat([self.get_eq_index(v) for v in variables])
# remember, vec is column order!
offsets = np.concatenate([k + k ** 2 * j + k * vinds + eq_index
for j in range(p)])
C[np.arange(N), offsets] = 1
# Lutkepohl 3.6.5
Cb = np.dot(C, vec(self.params.T))
middle = L.inv(chain_dot(C, self.cov_params, C.T))
# wald statistic
lam_wald = statistic = chain_dot(Cb, middle, Cb)
if kind.lower() == 'wald':
df = N
dist = stats.chi2(df)
elif kind.lower() == 'f':
statistic = lam_wald / N
df = (N, k * self.df_resid)
dist = stats.f(*df)
else:
raise Exception('kind %s not recognized' % kind)
pvalue = dist.sf(statistic)
crit_value = dist.ppf(1 - signif)
conclusion = 'fail to reject' if statistic < crit_value else 'reject'
results = {
'statistic' : statistic,
'crit_value' : crit_value,
'pvalue' : pvalue,
'df' : df,
'conclusion' : conclusion,
'signif' : signif
}
if verbose:
summ = output.causality_summary(results, variables, equation, kind)
print(summ)
return results
def test_whiteness(self, nlags=10, plot=True, linewidth=8):
"""
Test white noise assumption. Sample (Y) autocorrelations are compared
with the standard :math:`2 / \sqrt(T)` bounds.
Parameters
----------
plot : boolean, default True
Plot autocorrelations with 2 / sqrt(T) bounds
"""
acorrs = self.sample_acorr(nlags)
bound = 2 / np.sqrt(self.nobs)
# TODO: this probably needs some UI work
if (np.abs(acorrs) > bound).any():
print('FAIL: Some autocorrelations exceed %.4f bound. '
'See plot' % bound)
else:
print('PASS: No autocorrelations exceed %.4f bound' % bound)
if plot:
fig = plotting.plot_full_acorr(acorrs[1:],
xlabel=np.arange(1, nlags+1),
err_bound=bound,
linewidth=linewidth)
fig.suptitle(r"ACF plots with $2 / \sqrt{T}$ bounds "
"for testing whiteness assumption")
def test_normality(self, signif=0.05, verbose=True):
"""
Test assumption of normal-distributed errors using Jarque-Bera-style
omnibus Chi^2 test
Parameters
----------
signif : float
Test significance threshold
Notes
-----
H0 (null) : data are generated by a Gaussian-distributed process
"""
Pinv = npl.inv(self._chol_sigma_u)
w = np.array([np.dot(Pinv, u) for u in self.resid])
b1 = (w ** 3).sum(0) / self.nobs
lam_skew = self.nobs * np.dot(b1, b1) / 6
b2 = (w ** 4).sum(0) / self.nobs - 3
lam_kurt = self.nobs * np.dot(b2, b2) / 24
lam_omni = lam_skew + lam_kurt
omni_dist = stats.chi2(self.neqs * 2)
omni_pvalue = omni_dist.sf(lam_omni)
crit_omni = omni_dist.ppf(1 - signif)
conclusion = 'fail to reject' if lam_omni < crit_omni else 'reject'
results = {
'statistic' : lam_omni,
'crit_value' : crit_omni,
'pvalue' : omni_pvalue,
'df' : self.neqs * 2,
'conclusion' : conclusion,
'signif' : signif
}
if verbose:
summ = output.normality_summary(results)
print(summ)
return results
@cache_readonly
def detomega(self):
r"""
Return determinant of white noise covariance with degrees of freedom
correction:
.. math::
\hat \Omega = \frac{T}{T - Kp - 1} \hat \Omega_{\mathrm{MLE}}
"""
return L.det(self.sigma_u)
@cache_readonly
def info_criteria(self):
"information criteria for lagorder selection"
nobs = self.nobs
neqs = self.neqs
lag_order = self.k_ar
free_params = lag_order * neqs ** 2 + neqs * self.k_trend
ld = logdet_symm(self.sigma_u_mle)
# See Lutkepohl pp. 146-150
aic = ld + (2. / nobs) * free_params
bic = ld + (np.log(nobs) / nobs) * free_params
hqic = ld + (2. * np.log(np.log(nobs)) / nobs) * free_params
fpe = ((nobs + self.df_model) / self.df_resid) ** neqs * np.exp(ld)
return {
'aic' : aic,
'bic' : bic,
'hqic' : hqic,
'fpe' : fpe
}
@property
def aic(self):
"Akaike information criterion"
return self.info_criteria['aic']
@property
def fpe(self):
"""Final Prediction Error (FPE)
Lutkepohl p. 147, see info_criteria
"""
return self.info_criteria['fpe']
@property
def hqic(self):
"Hannan-Quinn criterion"
return self.info_criteria['hqic']
@property
def bic(self):
"Bayesian a.k.a. Schwarz info criterion"
return self.info_criteria['bic']
@cache_readonly
def roots(self):
neqs = self.neqs
k_ar = self.k_ar
p = neqs * k_ar
arr = np.zeros((p,p))
arr[:neqs,:] = np.column_stack(self.coefs)
arr[neqs:,:-neqs] = np.eye(p-neqs)
roots = np.linalg.eig(arr)[0]**-1
idx = np.argsort(np.abs(roots))[::-1] # sort by reverse modulus
return roots[idx]
class VARResultsWrapper(wrap.ResultsWrapper):
_attrs = {'bse' : 'columns_eq', 'cov_params' : 'cov',
'params' : 'columns_eq', 'pvalues' : 'columns_eq',
'tvalues' : 'columns_eq', 'sigma_u' : 'cov_eq',
'sigma_u_mle' : 'cov_eq',
'stderr' : 'columns_eq'}
_wrap_attrs = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_methods,
_methods)
_wrap_methods.pop('cov_params') # not yet a method in VARResults
wrap.populate_wrapper(VARResultsWrapper, VARResults)
class FEVD(object):
"""
Compute and plot Forecast error variance decomposition and asymptotic
standard errors
"""
def __init__(self, model, P=None, periods=None):
self.periods = periods
self.model = model
self.neqs = model.neqs
self.names = model.model.endog_names
self.irfobj = model.irf(var_decomp=P, periods=periods)
self.orth_irfs = self.irfobj.orth_irfs
# cumulative impulse responses
irfs = (self.orth_irfs[:periods] ** 2).cumsum(axis=0)
rng = lrange(self.neqs)
mse = self.model.mse(periods)[:, rng, rng]
# lag x equation x component
fevd = np.empty_like(irfs)
for i in range(periods):
fevd[i] = (irfs[i].T / mse[i]).T
# switch to equation x lag x component
self.decomp = fevd.swapaxes(0, 1)
def summary(self):
buf = StringIO()
rng = lrange(self.periods)
for i in range(self.neqs):
ppm = output.pprint_matrix(self.decomp[i], rng, self.names)
buf.write('FEVD for %s\n' % self.names[i])
buf.write(ppm + '\n')
print(buf.getvalue())
def cov(self):
"""Compute asymptotic standard errors
Returns
-------
"""
raise NotImplementedError
def plot(self, periods=None, figsize=(10,10), **plot_kwds):
"""Plot graphical display of FEVD
Parameters
----------
periods : int, default None
Defaults to number originally specified. Can be at most that number
"""
import matplotlib.pyplot as plt
k = self.neqs
periods = periods or self.periods
fig, axes = plt.subplots(nrows=k, figsize=figsize)
fig.suptitle('Forecast error variance decomposition (FEVD)')
colors = [str(c) for c in np.arange(k, dtype=float) / k]
ticks = np.arange(periods)
limits = self.decomp.cumsum(2)
for i in range(k):
ax = axes[i]
this_limits = limits[i].T
handles = []
for j in range(k):
lower = this_limits[j - 1] if j > 0 else 0
upper = this_limits[j]
handle = ax.bar(ticks, upper - lower, bottom=lower,
color=colors[j], label=self.names[j],
**plot_kwds)
handles.append(handle)
ax.set_title(self.names[i])
# just use the last axis to get handles for plotting
handles, labels = ax.get_legend_handles_labels()
fig.legend(handles, labels, loc='upper right')
plotting.adjust_subplots(right=0.85)
#-------------------------------------------------------------------------------
def _compute_acov(x, nlags=1):
x = x - x.mean(0)
result = []
for lag in range(nlags + 1):
if lag > 0:
r = np.dot(x[lag:].T, x[:-lag])
else:
r = np.dot(x.T, x)
result.append(r)
return np.array(result) / len(x)
def _acovs_to_acorrs(acovs):
sd = np.sqrt(np.diag(acovs[0]))
return acovs / np.outer(sd, sd)
if __name__ == '__main__':
import statsmodels.api as sm
from statsmodels.tsa.vector_ar.util import parse_lutkepohl_data
import statsmodels.tools.data as data_util
np.set_printoptions(linewidth=140, precision=5)
sdata, dates = parse_lutkepohl_data('data/%s.dat' % 'e1')
names = sdata.dtype.names
data = data_util.struct_to_ndarray(sdata)
adj_data = np.diff(np.log(data), axis=0)
# est = VAR(adj_data, p=2, dates=dates[1:], names=names)
model = VAR(adj_data[:-16], dates=dates[1:-16], names=names)
# model = VAR(adj_data[:-16], dates=dates[1:-16], names=names)
est = model.fit(maxlags=2)
irf = est.irf()
y = est.y[-2:]
"""
# irf.plot_irf()
# i = 2; j = 1
# cv = irf.cum_effect_cov(orth=True)
# print np.sqrt(cv[:, j * 3 + i, j * 3 + i]) / 1e-2
# data = np.genfromtxt('Canada.csv', delimiter=',', names=True)
# data = data.view((float, 4))
"""
'''
mdata = sm.datasets.macrodata.load().data
mdata2 = mdata[['realgdp','realcons','realinv']]
names = mdata2.dtype.names
data = mdata2.view((float,3))
data = np.diff(np.log(data), axis=0)
import pandas as pn
df = pn.DataFrame.fromRecords(mdata)
df = np.log(df.reindex(columns=names))
df = (df - df.shift(1)).dropna()
model = VAR(df)
est = model.fit(maxlags=2)
irf = est.irf()
'''
| bsd-3-clause |
BlueBrain/NeuroM | examples/end_to_end_distance.py | 1 | 4398 | #!/usr/bin/env python
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Calculate and plot end-to-end distance of neurites."""
import neurom as nm
from neurom import morphmath
import numpy as np
import matplotlib.pyplot as plt
def path_end_to_end_distance(neurite):
"""Calculate and return end-to-end-distance of a given neurite."""
trunk = neurite.root_node.points[0]
return max(morphmath.point_dist(l.points[-1], trunk)
for l in neurite.root_node.ileaf())
def mean_end_to_end_dist(neurites):
"""Calculate mean end to end distance for set of neurites."""
return np.mean([path_end_to_end_distance(n) for n in neurites])
def make_end_to_end_distance_plot(nb_segments, end_to_end_distance, neurite_type):
"""Plot end-to-end distance vs number of segments."""
plt.figure()
plt.plot(nb_segments, end_to_end_distance)
plt.title(neurite_type)
plt.xlabel('Number of segments')
plt.ylabel('End-to-end distance')
plt.show()
def calculate_and_plot_end_to_end_distance(neurite):
"""Calculate and plot the end-to-end distance vs the number of segments for
an increasingly larger part of a given neurite.
Note that the plots are not very meaningful for bifurcating trees."""
def _dist(seg):
"""Distance between segmenr end and trunk."""
return morphmath.point_dist(seg[1], neurite.root_node.points[0])
end_to_end_distance = [_dist(s) for s in nm.iter_segments(neurite)]
make_end_to_end_distance_plot(np.arange(len(end_to_end_distance)) + 1,
end_to_end_distance, neurite.type)
if __name__ == '__main__':
# load a neuron from an SWC file
filename = 'tests/data/swc/Neuron_3_random_walker_branches.swc'
nrn = nm.load_neuron(filename)
# print mean end-to-end distance per neurite type
print('Mean end-to-end distance for axons: ',
mean_end_to_end_dist(n for n in nrn.neurites if n.type == nm.AXON))
print('Mean end-to-end distance for basal dendrites: ',
mean_end_to_end_dist(n for n in nrn.neurites if n.type == nm.BASAL_DENDRITE))
print('Mean end-to-end distance for apical dendrites: ',
mean_end_to_end_dist(n for n in nrn.neurites
if n.type == nm.APICAL_DENDRITE))
print('End-to-end distance per neurite (nb segments, end-to-end distance, neurite type):')
for nrte in nrn.neurites:
# plot end-to-end distance for increasingly larger parts of neurite
calculate_and_plot_end_to_end_distance(nrte)
# print (number of segments, end-to-end distance, neurite type)
print(sum(len(s.points) - 1 for s in nrte.root_node.ipreorder()),
path_end_to_end_distance(nrte), nrte.type)
| bsd-3-clause |
harterj/moose | modules/porous_flow/doc/content/modules/porous_flow/tests/sinks/sinks.py | 9 | 10282 | #!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
def expected_s01(t):
bulk = 1.3
dens0 = 1.1
rate = -6
por = 0.1
area = 0.5
vol = 0.25
p0 = 1
initial_mass = vol * por * dens0 * np.exp(p0 / bulk)
return initial_mass + rate * area * t
def expected_s02(t):
# rho = rho0*exp(rate*area*perm*t/visc/vol/por)
bulk = 1.3
dens0 = 1.1
rate = -6
por = 0.1
area = 0.5
vol = 0.25
p0 = 1
visc = 1.1
perm = 0.2
initial_dens = dens0 * np.exp(p0 / bulk)
return vol * por * initial_dens * np.exp(rate * area * perm * t / visc / vol / por)
def expected_s03(s):
rate = 6
area = 0.5
return rate * area * s * s
def expected_s04(p):
return [8 * min(max(pp + 0.2, 0.5), 1) for pp in p]
def expected_s05(p):
fcn = 6
center = 0.9
sd = 0.5
return [fcn * np.exp(-0.5 * pow(min((pp - center) / sd, 0), 2)) for pp in p]
def expected_s06(p):
fcn = 3
center = 0.9
cutoff = -0.8
xx = p - center
return [fcn if (x >= 0) else ( 0 if (x <= cutoff) else fcn / pow(cutoff, 3) * (2 * x + cutoff) * pow(x - cutoff, 2)) for x in xx]
def expected_s07(f):
rate = 6
area = 0.5
return rate * area * f
def expected_s08(pc):
mass_frac = 0.8
rate = 100
area = 0.5
al = 1.1
m = 0.5
sg = 1 - pow(1.0 + pow(al * pc, 1.0 / (1.0 - m)), -m)
return rate * area * mass_frac * sg * sg
def s01():
f = open("../../../../../../test/tests/sinks/gold/s01.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()]
m00 = [d[2] for d in data]
t = [d[0] for d in data]
return (t, m00)
def s02():
f = open("../../../../../../test/tests/sinks/gold/s02.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()]
m00 = [d[1] for d in data]
t = [d[0] for d in data]
return (t, m00)
def s03():
f = open("../../../../../../test/tests/sinks/gold/s03.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()]
rate00 = [d[5] for d in data]
s = [d[10] for d in data]
return (s, rate00)
def s04():
f = open("../../../../../../test/tests/sinks/gold/s04.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()]
rate10 = [d[3] for d in data]
p = [d[9] for d in data]
return (p, rate10)
def s05():
f = open("../../../../../../test/tests/sinks/gold/s05.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()]
rate10 = [2*d[1] for d in data]
p = [d[10] for d in data]
return (p, rate10)
def s06():
f = open("../../../../../../test/tests/sinks/gold/s06.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()]
prate = sorted(list(set([(d[11], d[1]) for d in data] + [(d[12], d[2]) for d in data] + [(d[13], d[3]) for d in data] + [(d[14], d[4]) for d in data])), key = lambda x: x[0])
return zip(*prate)
def s07():
f = open("../../../../../../test/tests/sinks/gold/s07.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()]
massfrac = [d[1] for d in data]
flux = [d[4] for d in data]
return (massfrac, flux)
def s08():
f = open("../../../../../../test/tests/sinks/gold/s08.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()]
rate00 = [d[1] for d in data]
pc = [(d[6] - d[7]) for d in data]
return (pc, rate00)
plt.figure()
moose_results = s01()
mooset = moose_results[0]
moosem = moose_results[1]
delt = (mooset[-1] - mooset[0])/100
tpoints = np.arange(mooset[0] - delt, mooset[-1] + delt, delt)
plt.plot(tpoints, expected_s01(tpoints), 'k-', linewidth = 3.0, label = 'expected')
plt.plot(mooset, moosem, 'rs', markersize = 10.0, label = 'MOOSE')
plt.legend(loc = 'upper right')
plt.xlabel("t (s)")
plt.ylabel("Nodal mass (kg)")
plt.title("Basic sink")
plt.savefig("s01.png")
plt.figure()
moose_results = s02()
mooset = moose_results[0]
moosem = moose_results[1]
delt = (mooset[-1] - mooset[0])/100
tpoints = np.arange(mooset[0] - delt, mooset[-1] + delt, delt)
plt.plot(tpoints, expected_s02(tpoints), 'k-', linewidth = 3.0, label = 'expected')
plt.plot(mooset, moosem, 'rs', markersize = 10.0, label = 'MOOSE')
plt.legend(loc = 'upper right')
plt.xlabel("t (s)")
plt.ylabel("Nodal mass (kg)")
plt.title("Basic sink with mobility multiplier")
plt.savefig("s02.png")
plt.figure()
moose_results = s03()
mooses = moose_results[0]
mooser = moose_results[1]
dels = (mooses[0] - mooses[-1])/100
spoints = np.arange(mooses[-1] - dels, mooses[0] + dels, dels)
plt.plot(spoints, expected_s03(spoints), 'k-', linewidth = 3.0, label = 'expected')
plt.plot(mooses, mooser, 'rs', markersize = 10.0, label = 'MOOSE')
plt.legend(loc = 'upper right')
plt.xlabel("Saturation")
plt.ylabel("Sink rate")
plt.title("Basic sink with relative-permeability multiplier")
plt.savefig("s03.png")
plt.figure()
moose_results = s04()
moosep = moose_results[0]
mooser = moose_results[1]
delp = (moosep[0] - moosep[-1])/100
ppoints = np.arange(moosep[-1] - delp, moosep[0] + delp, delp)
plt.plot(ppoints, expected_s04(ppoints), 'k-', linewidth = 3.0, label = 'expected')
plt.plot(moosep, mooser, 'rs', markersize = 10.0, label = 'MOOSE')
plt.legend(loc = 'lower right')
plt.xlabel("Porepressure (Pa)")
plt.ylabel("Sink rate (kg/m^2/s)")
plt.title("Piecewise-linear sink")
plt.axis([0.1, 1, 3.9, 8.1])
plt.savefig("s04.png")
plt.figure()
moose_results = s05()
moosep = moose_results[0]
mooser = moose_results[1]
delp = (moosep[0] - moosep[-1])/100
ppoints = np.arange(moosep[-1] - delp, moosep[0] + delp, delp)
plt.plot(ppoints, expected_s05(ppoints), 'k-', linewidth = 3.0, label = 'expected')
plt.plot(moosep, mooser, 'rs', markersize = 10.0, label = 'MOOSE')
plt.legend(loc = 'lower right')
plt.xlabel("Porepressure (Pa)")
plt.ylabel("Sink rate (kg/m^2/s)")
plt.title("Half-Gaussian sink")
plt.axis([-0.4, 1.2, 0, 6.1])
plt.savefig("s05.png")
plt.figure()
moose_results = s06()
moosep = moose_results[0]
mooser = moose_results[1]
delp = (moosep[-1] - moosep[0])/100
ppoints = np.arange(moosep[0] - delp, moosep[-1] + delp, delp)
plt.plot(ppoints, expected_s06(ppoints), 'k-', linewidth = 3.0, label = 'expected')
plt.plot(moosep, mooser, 'rs', markersize = 10.0, label = 'MOOSE')
plt.legend(loc = 'lower right')
plt.xlabel("Porepressure (Pa)")
plt.ylabel("Sink rate (kg/m^2/s)")
plt.title("Half-cubic sink")
plt.axis([-0.1, 1.3, -0.1, 3.1])
plt.savefig("s06.png")
plt.figure()
moose_results = s07()
moosefrac = moose_results[0]
mooseflux = moose_results[1]
delfrac = (moosefrac[0] - moosefrac[-1])/100
fpoints = np.arange(moosefrac[-1] - delfrac, moosefrac[0] + delfrac, delfrac)
plt.plot(fpoints, expected_s07(fpoints), 'k-', linewidth = 3.0, label = 'expected')
plt.plot(moosefrac, mooseflux, 'rs', markersize = 10.0, label = 'MOOSE')
plt.legend(loc = 'lower right')
plt.xlabel("Mass fraction")
plt.ylabel("Sink rate (kg/m^2/s)")
plt.title("Mass-fraction dependent sink")
plt.savefig("s07.png")
plt.figure()
moose_results = s08()
moosepc = moose_results[0]
mooser = moose_results[1]
delpc = (moosepc[0] - moosepc[-1])/100
pcpoints = np.arange(moosepc[-1] - dels, moosepc[0] + dels, dels)
plt.plot(pcpoints, expected_s08(pcpoints), 'k-', linewidth = 3.0, label = 'expected')
plt.plot(moosepc, mooser, 'rs', markersize = 10.0, label = 'MOOSE')
plt.legend(loc = 'lower right')
plt.xlabel("Capillary pressure (Pa)")
plt.ylabel("Sink rate (kg/m^2/s)")
plt.title("Mass-fraction and relative-permeability dependent sink (2 phase, 3 comp)")
plt.savefig("s08.png")
def s09_01():
f = open("../../../../../../test/tests/sinks/gold/s09_mf_0010.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()]
return ([d[0] for d in data], [d[4] for d in data])
def s09_05():
f = open("../../../../../../test/tests/sinks/gold/s09_mf_0050.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()]
return ([d[0] for d in data], [d[4] for d in data])
def s09_10():
f = open("../../../../../../test/tests/sinks/gold/s09_mf_0100.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()]
return ([d[0] for d in data], [d[4] for d in data])
def s09_fully_saturated_01():
f = open("../../../../../../test/tests/sinks/gold/s09_fully_saturated_mf_0010.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()]
return ([d[0] for d in data], [d[4] for d in data])
def s09_fully_saturated_05():
f = open("../../../../../../test/tests/sinks/gold/s09_fully_saturated_mf_0050.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()]
return ([d[0] for d in data], [d[4] for d in data])
def s09_fully_saturated_10():
f = open("../../../../../../test/tests/sinks/gold/s09_fully_saturated_mf_0100.csv")
data = [map(float, line.strip().split(",")) for line in f.readlines()[1:] if line.strip()]
return ([d[0] for d in data], [d[4] for d in data])
plt.figure()
plt.plot(s09_01()[0], s09_01()[1], 'k-', label = 'upwinded, t=0.1')
plt.plot(s09_fully_saturated_01()[0], s09_fully_saturated_01()[1], 'r-', label = 'no upwinding, t=0.1')
plt.plot(s09_05()[0], s09_05()[1], 'k--', label = 'upwinded, t=0.5')
plt.plot(s09_fully_saturated_05()[0], s09_fully_saturated_05()[1], 'r--', label = 'no upwinding, t=0.5')
plt.plot(s09_10()[0], s09_10()[1], 'k.', label = 'upwinded, t=1.0')
plt.plot(s09_fully_saturated_10()[0], s09_fully_saturated_10()[1], 'r.', label = 'no upwinding, t=1.0')
plt.legend()
plt.xlabel("Mass fraction")
plt.ylabel("x (m)")
plt.title("Advected mass-fraction along line")
plt.savefig("s09.png")
sys.exit(0)
| lgpl-2.1 |
glenioborges/ibis | ibis/sql/sqlite/tests/test_client.py | 6 | 2772 | # Copyright 2015 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pandas as pd
from .common import SQLiteTests
from ibis.compat import unittest
from ibis.tests.util import assert_equal
from ibis.util import guid
import ibis.expr.types as ir
import ibis.common as com
import ibis
class TestSQLiteClient(SQLiteTests, unittest.TestCase):
@classmethod
def tearDownClass(cls):
pass
def test_file_not_exist_and_create(self):
path = '__ibis_tmp_{0}.db'.format(guid())
with self.assertRaises(com.IbisError):
ibis.sqlite.connect(path)
ibis.sqlite.connect(path, create=True)
assert os.path.exists(path)
os.remove(path)
def test_table(self):
table = self.con.table('functional_alltypes')
assert isinstance(table, ir.TableExpr)
def test_array_execute(self):
d = self.alltypes.limit(10).double_col
s = d.execute()
assert isinstance(s, pd.Series)
assert len(s) == 10
def test_literal_execute(self):
expr = ibis.literal('1234')
result = self.con.execute(expr)
assert result == '1234'
def test_simple_aggregate_execute(self):
d = self.alltypes.double_col.sum()
v = d.execute()
assert isinstance(v, float)
def test_list_tables(self):
assert len(self.con.list_tables()) > 0
assert len(self.con.list_tables(like='functional')) == 1
def test_compile_verify(self):
unsupported_expr = self.alltypes.string_col.approx_nunique()
assert not unsupported_expr.verify()
supported_expr = self.alltypes.double_col.sum()
assert supported_expr.verify()
def test_attach_file(self):
pass
def test_database_layer(self):
db = self.con.database()
t = db.functional_alltypes
assert_equal(t, self.alltypes)
assert db.list_tables() == self.con.list_tables()
def test_compile_toplevel(self):
# t = ibis.table([
# ('foo', 'double')
# ])
# # it works!
# expr = t.foo.sum()
# ibis.sqlite.compile(expr)
# This does not work yet because if the compiler encounters a
# non-SQLAlchemy table it fails
pass
| apache-2.0 |
deepmind/deepmind-research | meshgraphnets/plot_cloth.py | 1 | 2159 | # Lint as: python3
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Plots a cloth trajectory rollout."""
import pickle
from absl import app
from absl import flags
from matplotlib import animation
import matplotlib.pyplot as plt
FLAGS = flags.FLAGS
flags.DEFINE_string('rollout_path', None, 'Path to rollout pickle file')
def main(unused_argv):
with open(FLAGS.rollout_path, 'rb') as fp:
rollout_data = pickle.load(fp)
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(111, projection='3d')
skip = 10
num_steps = rollout_data[0]['gt_pos'].shape[0]
num_frames = len(rollout_data) * num_steps // skip
# compute bounds
bounds = []
for trajectory in rollout_data:
bb_min = trajectory['gt_pos'].min(axis=(0, 1))
bb_max = trajectory['gt_pos'].max(axis=(0, 1))
bounds.append((bb_min, bb_max))
def animate(num):
step = (num*skip) % num_steps
traj = (num*skip) // num_steps
ax.cla()
bound = bounds[traj]
ax.set_xlim([bound[0][0], bound[1][0]])
ax.set_ylim([bound[0][1], bound[1][1]])
ax.set_zlim([bound[0][2], bound[1][2]])
pos = rollout_data[traj]['pred_pos'][step]
faces = rollout_data[traj]['faces'][step]
ax.plot_trisurf(pos[:, 0], pos[:, 1], faces, pos[:, 2], shade=True)
ax.set_title('Trajectory %d Step %d' % (traj, step))
return fig,
_ = animation.FuncAnimation(fig, animate, frames=num_frames, interval=100)
plt.show(block=True)
if __name__ == '__main__':
app.run(main)
| apache-2.0 |
MaxHalford/Prince | tests/test_pca.py | 1 | 3331 | import unittest
import matplotlib as mpl
import numpy as np
import pandas as pd
from sklearn import datasets
from sklearn import decomposition
from sklearn.utils import estimator_checks
import prince
class TestPCA(unittest.TestCase):
def setUp(self):
X, _ = datasets.load_iris(return_X_y=True)
columns = ['Sepal length', 'Sepal width', 'Petal length', 'Sepal length']
self.X = pd.DataFrame(X, columns=columns)
def test_fit_pandas_dataframe(self):
pca = prince.PCA(n_components=2, engine='fbpca')
self.assertTrue(isinstance(pca.fit(self.X), prince.PCA))
def test_transform_pandas_dataframe(self):
pca = prince.PCA(n_components=2)
self.assertTrue(isinstance(pca.fit(self.X).transform(self.X), pd.DataFrame))
def test_fit_numpy_array(self):
pca = prince.PCA(n_components=2, engine='fbpca')
self.assertTrue(isinstance(pca.fit(self.X.values), prince.PCA))
def test_transform_numpy_array(self):
pca = prince.PCA(n_components=2)
self.assertTrue(isinstance(pca.fit(self.X.values).transform(self.X.values), pd.DataFrame))
def test_copy(self):
XX = np.copy(self.X)
pca = prince.PCA(n_components=2, copy=True)
pca.fit(XX)
np.testing.assert_array_equal(self.X, XX)
pca = prince.PCA(n_components=2, copy=False)
pca.fit(XX)
self.assertRaises(AssertionError, np.testing.assert_array_equal, self.X, XX)
def test_fit_transform(self):
# Without rescaling
prince_pca = prince.PCA(n_components=3, rescale_with_mean=False, rescale_with_std=False)
pd.testing.assert_frame_equal(
prince_pca.fit_transform(self.X),
prince_pca.fit(self.X).transform(self.X)
)
# With rescaling
prince_pca = prince.PCA(n_components=3, rescale_with_mean=True, rescale_with_std=True)
pd.testing.assert_frame_equal(
prince_pca.fit_transform(self.X),
prince_pca.fit(self.X).transform(self.X)
)
def test_compare_sklearn(self):
n_components = 4
pca_prince = prince.PCA(n_components=n_components, rescale_with_std=False)
pca_sklearn = decomposition.PCA(n_components=n_components)
pca_prince.fit(self.X)
pca_sklearn.fit(self.X)
# Compare eigenvalues
np.testing.assert_array_almost_equal(
pca_prince.eigenvalues_,
np.square(pca_sklearn.singular_values_),
)
# Compare row projections
np.testing.assert_array_almost_equal(
pca_prince.transform(self.X),
pca_sklearn.transform(self.X)
)
# Compare explained inertia
np.testing.assert_array_almost_equal(
pca_prince.explained_inertia_,
pca_sklearn.explained_variance_ratio_
)
def test_explained_inertia_(self):
pca = prince.PCA(n_components=4)
pca.fit(self.X)
self.assertTrue(np.isclose(sum(pca.explained_inertia_), 1))
def test_plot_row_coordinates(self):
pca = prince.PCA(n_components=4)
pca.fit(self.X)
ax = pca.plot_row_coordinates(self.X)
self.assertTrue(isinstance(ax, mpl.axes.Axes))
def test_check_estimator(self):
estimator_checks.check_estimator(prince.PCA)
| mit |
userdw/RaspberryPi_3_Starter_Kit | 08_Image_Processing/Color_Spaces/hls/hls.py | 1 | 1179 | import os, cv2
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
_projectDirectory = os.path.dirname(__file__)
_imagesDirectory = os.path.join(_projectDirectory, "images")
_images = []
for _root, _dirs, _files in os.walk(_imagesDirectory):
for _file in _files:
if _file.endswith(".jpg"):
_images.append(os.path.join(_imagesDirectory, _file))
_imageIndex = 0
_imageTotal = len(_images)
_img1 = cv2.imread(_images[_imageIndex], cv2.IMREAD_UNCHANGED)
_fig = plt.figure("Color Spaces")
_gs = GridSpec(3, 3)
_fig1 = plt.subplot(_gs[0:3, 0:2])
_fig1.set_title("RGB Space")
_img1Show = cv2.cvtColor(_img1, cv2.COLOR_BGR2RGB) #for displaying purpose
plt.imshow(_img1Show)
_img1 = cv2.cvtColor(_img1, cv2.COLOR_BGR2HLS)
_img2, _img3, _img4 = cv2.split(_img1)
_fig2 = plt.subplot(_gs[0, 2])
_fig2.set_title("Hue Channel")
plt.imshow(_img2, cmap = "gray")
_fig3 = plt.subplot(_gs[1, 2])
_fig3.set_title("Lightness Channel")
plt.imshow(_img3, cmap = "gray")
_fig4 = plt.subplot(_gs[2, 2])
_fig4.set_title("Saturation Channel")
plt.imshow(_img4, cmap = "gray")
plt.tight_layout()
plt.show()
| mit |
jjhelmus/artview | docs/sphinxext/numpydoc/tests/test_docscrape.py | 3 | 17864 | # -*- encoding:utf-8 -*-
from __future__ import division, absolute_import, print_function
import sys, textwrap
from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc
from numpydoc.docscrape_sphinx import SphinxDocString, SphinxClassDoc
from nose.tools import *
doc_txt = '''\
numpy.multivariate_normal(mean, cov, shape=None, spam=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N, N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
Other Parameters
----------------
spam : parrot
A parrot off its mortal coil.
Raises
------
RuntimeError
Some error
Warns
-----
RuntimeWarning
Some warning
Warnings
--------
Certain warnings apply.
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
See Also
--------
some, other, funcs
otherfunc : relationship
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss
'''
doc = NumpyDocString(doc_txt)
def test_signature():
assert doc['Signature'].startswith('numpy.multivariate_normal(')
assert doc['Signature'].endswith('spam=None)')
def test_summary():
assert doc['Summary'][0].startswith('Draw values')
assert doc['Summary'][-1].endswith('covariance.')
def test_extended_summary():
assert doc['Extended Summary'][0].startswith('The multivariate normal')
def test_parameters():
assert_equal(len(doc['Parameters']), 3)
assert_equal([n for n,_,_ in doc['Parameters']], ['mean','cov','shape'])
arg, arg_type, desc = doc['Parameters'][1]
assert_equal(arg_type, '(N,N) ndarray')
assert desc[0].startswith('Covariance matrix')
assert doc['Parameters'][0][-1][-2] == ' (1+2+3)/3'
def test_other_parameters():
assert_equal(len(doc['Other Parameters']), 1)
assert_equal([n for n,_,_ in doc['Other Parameters']], ['spam'])
arg, arg_type, desc = doc['Other Parameters'][0]
assert_equal(arg_type, 'parrot')
assert desc[0].startswith('A parrot off its mortal coil')
def test_returns():
assert_equal(len(doc['Returns']), 1)
arg, arg_type, desc = doc['Returns'][0]
assert_equal(arg, 'out')
assert_equal(arg_type, 'ndarray')
assert desc[0].startswith('The drawn samples')
assert desc[-1].endswith('distribution.')
def test_notes():
assert doc['Notes'][0].startswith('Instead')
assert doc['Notes'][-1].endswith('definite.')
assert_equal(len(doc['Notes']), 17)
def test_references():
assert doc['References'][0].startswith('..')
assert doc['References'][-1].endswith('2001.')
def test_examples():
assert doc['Examples'][0].startswith('>>>')
assert doc['Examples'][-1].endswith('True]')
def test_index():
assert_equal(doc['index']['default'], 'random')
assert_equal(len(doc['index']), 2)
assert_equal(len(doc['index']['refguide']), 2)
def non_blank_line_by_line_compare(a,b):
a = textwrap.dedent(a)
b = textwrap.dedent(b)
a = [l for l in a.split('\n') if l.strip()]
b = [l for l in b.split('\n') if l.strip()]
for n,line in enumerate(a):
if not line == b[n]:
raise AssertionError("Lines %s of a and b differ: "
"\n>>> %s\n<<< %s\n" %
(n,line,b[n]))
def test_str():
non_blank_line_by_line_compare(str(doc),
"""numpy.multivariate_normal(mean, cov, shape=None, spam=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N, N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
Other Parameters
----------------
spam : parrot
A parrot off its mortal coil.
Raises
------
RuntimeError :
Some error
Warns
-----
RuntimeWarning :
Some warning
Warnings
--------
Certain warnings apply.
See Also
--------
`some`_, `other`_, `funcs`_
`otherfunc`_
relationship
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss""")
def test_sphinx_str():
sphinx_doc = SphinxDocString(doc_txt)
non_blank_line_by_line_compare(str(sphinx_doc),
"""
.. index:: random
single: random;distributions, random;gauss
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
:Parameters:
**mean** : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
**cov** : (N,N) ndarray
Covariance matrix of the distribution.
**shape** : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
:Returns:
**out** : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
:Other Parameters:
**spam** : parrot
A parrot off its mortal coil.
:Raises:
**RuntimeError** :
Some error
:Warns:
**RuntimeWarning** :
Some warning
.. warning::
Certain warnings apply.
.. seealso::
:obj:`some`, :obj:`other`, :obj:`funcs`
:obj:`otherfunc`
relationship
.. rubric:: Notes
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
.. rubric:: References
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
.. only:: latex
[1]_, [2]_
.. rubric:: Examples
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
""")
doc2 = NumpyDocString("""
Returns array of indices of the maximum values of along the given axis.
Parameters
----------
a : {array_like}
Array to look in.
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis""")
def test_parameters_without_extended_description():
assert_equal(len(doc2['Parameters']), 2)
doc3 = NumpyDocString("""
my_signature(*params, **kwds)
Return this and that.
""")
def test_escape_stars():
signature = str(doc3).split('\n')[0]
assert_equal(signature, 'my_signature(\*params, \*\*kwds)')
doc4 = NumpyDocString(
"""a.conj()
Return an array with all complex-valued elements conjugated.""")
def test_empty_extended_summary():
assert_equal(doc4['Extended Summary'], [])
doc5 = NumpyDocString(
"""
a.something()
Raises
------
LinAlgException
If array is singular.
Warns
-----
SomeWarning
If needed
""")
def test_raises():
assert_equal(len(doc5['Raises']), 1)
name,_,desc = doc5['Raises'][0]
assert_equal(name,'LinAlgException')
assert_equal(desc,['If array is singular.'])
def test_warns():
assert_equal(len(doc5['Warns']), 1)
name,_,desc = doc5['Warns'][0]
assert_equal(name,'SomeWarning')
assert_equal(desc,['If needed'])
def test_see_also():
doc6 = NumpyDocString(
"""
z(x,theta)
See Also
--------
func_a, func_b, func_c
func_d : some equivalent func
foo.func_e : some other func over
multiple lines
func_f, func_g, :meth:`func_h`, func_j,
func_k
:obj:`baz.obj_q`
:class:`class_j`: fubar
foobar
""")
assert len(doc6['See Also']) == 12
for func, desc, role in doc6['See Also']:
if func in ('func_a', 'func_b', 'func_c', 'func_f',
'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'):
assert(not desc)
else:
assert(desc)
if func == 'func_h':
assert role == 'meth'
elif func == 'baz.obj_q':
assert role == 'obj'
elif func == 'class_j':
assert role == 'class'
else:
assert role is None
if func == 'func_d':
assert desc == ['some equivalent func']
elif func == 'foo.func_e':
assert desc == ['some other func over', 'multiple lines']
elif func == 'class_j':
assert desc == ['fubar', 'foobar']
def test_see_also_print():
class Dummy(object):
"""
See Also
--------
func_a, func_b
func_c : some relationship
goes here
func_d
"""
pass
obj = Dummy()
s = str(FunctionDoc(obj, role='func'))
assert(':func:`func_a`, :func:`func_b`' in s)
assert(' some relationship' in s)
assert(':func:`func_d`' in s)
doc7 = NumpyDocString("""
Doc starts on second line.
""")
def test_empty_first_line():
assert doc7['Summary'][0].startswith('Doc starts')
def test_no_summary():
str(SphinxDocString("""
Parameters
----------"""))
def test_unicode():
doc = SphinxDocString("""
öäöäöäöäöåååå
öäöäöäööäååå
Parameters
----------
ååå : äää
ööö
Returns
-------
ååå : ööö
äää
""")
assert isinstance(doc['Summary'][0], str)
if sys.version_info[0] >= 3:
assert doc['Summary'][0] == u'öäöäöäöäöåååå'
else:
assert doc['Summary'][0] == u'öäöäöäöäöåååå'.encode('utf-8')
def test_plot_examples():
cfg = dict(use_plots=True)
doc = SphinxDocString("""
Examples
--------
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3],[4,5,6])
>>> plt.show()
""", config=cfg)
assert 'plot::' in str(doc), str(doc)
doc = SphinxDocString("""
Examples
--------
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3],[4,5,6])
plt.show()
""", config=cfg)
assert str(doc).count('plot::') == 1, str(doc)
def test_class_members():
class Dummy(object):
"""
Dummy class.
"""
def spam(self, a, b):
"""Spam\n\nSpam spam."""
pass
def ham(self, c, d):
"""Cheese\n\nNo cheese."""
pass
@property
def spammity(self):
"""Spammity index"""
return 0.95
class Ignorable(object):
"""local class, to be ignored"""
pass
for cls in (ClassDoc, SphinxClassDoc):
doc = cls(Dummy, config=dict(show_class_members=False))
assert 'Methods' not in str(doc), (cls, str(doc))
assert 'spam' not in str(doc), (cls, str(doc))
assert 'ham' not in str(doc), (cls, str(doc))
assert 'spammity' not in str(doc), (cls, str(doc))
assert 'Spammity index' not in str(doc), (cls, str(doc))
doc = cls(Dummy, config=dict(show_class_members=True))
assert 'Methods' in str(doc), (cls, str(doc))
assert 'spam' in str(doc), (cls, str(doc))
assert 'ham' in str(doc), (cls, str(doc))
assert 'spammity' in str(doc), (cls, str(doc))
if cls is SphinxClassDoc:
assert '.. autosummary::' in str(doc), str(doc)
else:
assert 'Spammity index' in str(doc), str(doc)
def test_duplicate_signature():
# Duplicate function signatures occur e.g. in ufuncs, when the
# automatic mechanism adds one, and a more detailed comes from the
# docstring itself.
doc = NumpyDocString(
"""
z(x1, x2)
z(a, theta)
""")
assert doc['Signature'].strip() == 'z(a, theta)'
class_doc_txt = """
Foo
Parameters
----------
f : callable ``f(t, y, *f_args)``
Aaa.
jac : callable ``jac(t, y, *jac_args)``
Bbb.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
Methods
-------
a
b
c
Examples
--------
For usage examples, see `ode`.
"""
def test_class_members_doc():
doc = ClassDoc(None, class_doc_txt)
non_blank_line_by_line_compare(str(doc),
"""
Foo
Parameters
----------
f : callable ``f(t, y, *f_args)``
Aaa.
jac : callable ``jac(t, y, *jac_args)``
Bbb.
Examples
--------
For usage examples, see `ode`.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
Methods
-------
a :
b :
c :
.. index::
""")
def test_class_members_doc_sphinx():
doc = SphinxClassDoc(None, class_doc_txt)
non_blank_line_by_line_compare(str(doc),
"""
Foo
:Parameters:
**f** : callable ``f(t, y, *f_args)``
Aaa.
**jac** : callable ``jac(t, y, *jac_args)``
Bbb.
.. rubric:: Examples
For usage examples, see `ode`.
.. rubric:: Attributes
=== ==========
t (float) Current time.
y (ndarray) Current variable values.
=== ==========
.. rubric:: Methods
=== ==========
a
b
c
=== ==========
""")
if __name__ == "__main__":
import nose
nose.run()
| bsd-3-clause |
anhaidgroup/py_entitymatching | py_entitymatching/matcher/svmmatcher.py | 1 | 1145 | """
This module contains the functions for SVM classifier.
"""
from py_entitymatching.matcher.mlmatcher import MLMatcher
from py_entitymatching.matcher.matcherutils import get_ts
from sklearn.svm import SVC
class SVMMatcher(MLMatcher):
"""
SVM matcher.
Args:
*args,**kwargs: The arguments to scikit-learn's SVM
classifier.
name (string): The name of this matcher (defaults to None). If the
matcher name is None, the class automatically generates a string
and assigns it as the name.
"""
def __init__(self, *args, **kwargs):
super(SVMMatcher, self).__init__()
# If the name is given, then pop it
name = kwargs.pop('name', None)
if name is None:
# If the name of the matcher is give, then create one.
# Currently, we use a constant string + a random number.
self.name = 'SVM'+ '_' + get_ts()
else:
# Set the name of the matcher, with the given name.
self.name = name
# Set the classifier to the scikit-learn classifier.
self.clf = SVC(*args, **kwargs) | bsd-3-clause |
phdowling/scikit-learn | sklearn/tests/test_calibration.py | 213 | 12219 | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_greater, assert_almost_equal,
assert_greater_equal,
assert_array_equal,
assert_raises,
assert_warns_message)
from sklearn.datasets import make_classification, make_blobs
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.linear_model import Ridge
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.metrics import brier_score_loss, log_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
def test_calibration():
"""Test calibration objects with isotonic and sigmoid"""
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
pc_clf = CalibratedClassifierCV(clf, cv=y.size + 1)
assert_raises(ValueError, pc_clf.fit, X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv=2)
# Note that this fit overwrites the fit on the entire training
# set
pc_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_pc_clf = pc_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
pc_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
pc_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
pc_clf.fit(this_X_train, (y_train + 1) % 2,
sample_weight=sw_train)
prob_pos_pc_clf_relabeled = \
pc_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_pc_clf,
1 - prob_pos_pc_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss((y_test + 1) % 2,
prob_pos_pc_clf_relabeled))
# check that calibration can also deal with regressors that have
# a decision_function
clf_base_regressor = CalibratedClassifierCV(Ridge())
clf_base_regressor.fit(X_train, y_train)
clf_base_regressor.predict(X_test)
# Check failure cases:
# only "isotonic" and "sigmoid" should be accepted as methods
clf_invalid_method = CalibratedClassifierCV(clf, method="foo")
assert_raises(ValueError, clf_invalid_method.fit, X_train, y_train)
# base-estimators should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), method="sigmoid")
assert_raises(RuntimeError, clf_base_regressor.fit, X_train, y_train)
def test_sample_weight_warning():
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
for method in ['sigmoid', 'isotonic']:
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(base_estimator, method=method)
# LinearSVC does not currently support sample weights but they
# can still be used for the calibration step (with a warning)
msg = "LinearSVC does not support sample_weight."
assert_warns_message(
UserWarning, msg,
calibrated_clf.fit, X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# a different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert_greater(diff, 0.1)
def test_calibration_multiclass():
"""Test calibration for multiclass """
# test multi-class setting with classifier that implements
# only decision function
clf = LinearSVC()
X, y_idx = make_blobs(n_samples=100, n_features=2, random_state=42,
centers=3, cluster_std=3.0)
# Use categorical labels to check that CalibratedClassifierCV supports
# them correctly
target_names = np.array(['a', 'b', 'c'])
y = target_names[y_idx]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=2)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
assert_array_almost_equal(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that log-loss of calibrated classifier is smaller than
# log-loss of naively turned OvR decision function to probabilities
# via softmax
def softmax(y_pred):
e = np.exp(-y_pred)
return e / e.sum(axis=1).reshape(-1, 1)
uncalibrated_log_loss = \
log_loss(y_test, softmax(clf.decision_function(X_test)))
calibrated_log_loss = log_loss(y_test, probas)
assert_greater_equal(uncalibrated_log_loss, calibrated_log_loss)
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
X, y = make_blobs(n_samples=100, n_features=2, random_state=42,
cluster_std=3.0)
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
loss = log_loss(y_test, clf_probs)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
cal_loss = log_loss(y_test, cal_clf_probs)
assert_greater(loss, cal_loss)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
pc_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = pc_clf.predict_proba(this_X_test)
y_pred = pc_clf.predict(this_X_test)
prob_pos_pc_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
assert_raises(ValueError, _SigmoidCalibration().fit,
np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert_equal(len(prob_true), len(prob_pred))
assert_equal(len(prob_true), 2)
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
assert_raises(ValueError, calibration_curve, [1.1], [-0.1],
normalize=False)
def test_calibration_nan_imputer():
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', Imputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_c.fit(X, y)
clf_c.predict(X)
| bsd-3-clause |
MichaelAquilina/numpy | numpy/doc/creation.py | 118 | 5507 | """
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or structured arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to Numpy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most
obvious examples are lists and tuples. See the documentation for array() for
details for its use. Some objects may support the array-protocol and allow
conversion to arrays this way. A simple way to find out if the object can be
converted to a numpy array using array() is simply to try it interactively and
see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic Numpy Array Creation
==============================
Numpy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64.
``>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])``
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=np.float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: PyTables
FITS: PyFITS
Examples of formats that cannot be read directly but for which it is not hard to
convert are those formats supported by libraries like PIL (able to read and
write many image formats such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques though
that certainly is much more work and requires significantly more advanced
knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal).
"""
from __future__ import division, absolute_import, print_function
| bsd-3-clause |
Unpluralized/PyAE | pridb_filters.py | 1 | 19158 | # coding: utf-8
import pandas as pd
from numpy import gradient as np_gradient
import ConfigParser
from numba import jit
import time
import pickle
config = ConfigParser.ConfigParser()
config.read('./pridb_filter_config.ini')
def apply_filters(df, name, **kwargs):
"""
:param df: Pandas dataframe with AE hits & features
:param name: lookup name (test id) in pridb_filter_config.ini for filter parameters
:param kwargs: ignore_time_amount: default False. If True, ignores TimeMaxAmount in filter file
load_pickle: default False. If True, load previously filtered file from pickle.
If False, simply load virgin dataset and apply filters.
write_pickle: Write filtered dataframe to pickle in ./pridb_filters_pickles;
filename = name.pickle
:return: Dataframe with all filters applied to it.
"""
reframe_time = config_getter('General', 'ReframeTime')
counts_threshold = config_getter('General', 'ReflectionsCountsThreshold')
load_pickle = kwargs.get('load_pickle', False)
write_pickle = kwargs.get('write_pickle', False)
# disable annoying warnings about Pandas Chained Assignment. There is no danger here.
pd.options.mode.chained_assignment = None
# for name in names:
if load_pickle:
try:
df = pickle.load(open('./pridb_filters_pickles/%s.pickle' % name, 'rb'))
except IOError:
raise IOError('pridb_filters: load_pickle = True but no pickled dataset found.')
else:
print('pridb_filters.apply_filters: read %s hits in %s minutes from pickled file.'
% (str(len(df[df['TRAI'] > 0].index)), str(round(df['Time'].max() - df['Time'].min(), 2))))
return df
else:
t_lower = config_getter(name, 'TimeLowerBound')
load_upper = config_getter(name, 'LoadUpperBound')
loading_rate = config_getter(name, 'TestStartLoadingRateCriterion')
load_smoothing = config_getter(name, 'LoadSmoothing')
"""
Order of operations is important! Remove_reflections needs intact sequence of TRAI values
"""
df = time_filter(df, from_config=name, ignore_amount=kwargs.get('ignore_time_amount', False))
if kwargs.get('label_cycle', False):
df = label_load(df, name)
# df = remove_reflections(df, counts_threshold=counts_threshold)
if 'PA0' in df.columns:
# Convert the analog reading to kN
load_conversion_factor = config_getter(name, 'AnalogReadInTokN', 0.01)
df['PA0'] = df['PA0'].apply(lambda p: p*load_conversion_factor)
if 'PA1' in df.columns:
displ_conversion_factor = config_getter(name, 'AnalogReadInTomm', 0.01)
df['PA1'] = df['PA1'].apply(lambda p: p*displ_conversion_factor)
if name.startswith('S'):
# Filter out everything past maximum load for static tests
df = static_maxload_cutoff(df, load=load_upper)
df = min_positive_loading_rate(df, t_lower=t_lower, loading_rate=loading_rate)
if name.startswith('D'):
df = split_min_max_load(df)
if load_smoothing:
df['SetID'] = df.index
df.set_index('Time', inplace=True, drop=False)
df['PA0'].interpolate(method='nearest', limit_direction='both', inplace=True)
df['PA0'] = df['PA0'].rolling(window=int(load_smoothing), center=True, min_periods=10).mean()
df.set_index('SetID', inplace=True)
df = energy_filter(df, from_config=name)
df = amplitude_filter(df, from_config=name)
df = duration_filter(df, from_config=True)
df = hit_num_filter(df, from_config=name)
# Convert time column to minutes
df['Time'] = df['Time'] / 60.
pd.options.mode.chained_assignment = 'warn'
print('pridb_filters.apply_filters: left with %s hits in %s minutes after filtering.'
% (str(len(df[df['TRAI'] > 0].index)), str(round(df['Time'].max()-df['Time'].min(), 2))))
# print df[df['TRAI'] > 0].head()
if 'PA0' in df.columns or 'PA1' in df.columns:
if write_pickle:
with open('./pridb_filters_pickles/%s.pickle' % name, 'wb') as f:
pickle.dump(df.dropna(subset=['TRAI', 'PA0'], how='all'), f)
return df.dropna(subset=['TRAI', 'PA0'], how='all')
else:
if write_pickle:
with open('./pridb_filters_pickles/%s.pickle' % name, 'wb') as f:
pickle.dump(df[df['TRAI'] > 0], f)
return df[df['TRAI'] > 0]
def config_getter(section, option, *default):
try:
value = config.get(section, option).strip()
try:
return eval(value)
except NameError:
return value
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
if default:
return default[0]
else:
return None
def static_maxload_cutoff(df, **kwargs):
"""
Function to automatically discard data after maximum load has been reached.
(meant for static tests)
:param df: Pandas dataframe containing at least a 'Time' and 'PA0' (= Load) column
:param kwargs: 'load': set maximum load cutoff point manually.
:return: Filtered dataframe containing only entries that
happen before 'PA0' reaches its maximum value
"""
load_data = df[['Time', 'PA0']].dropna()
max_load = kwargs.get('load')
if max_load is None:
max_load = load_data['PA0'].max()
if len(load_data[load_data['PA0'] >= max_load].index) > 0:
max_load_index = load_data[load_data['PA0'] >= max_load].index[-1]
print('pridb_filters: filtered all data after max. load of %s kN is reached.' % str(max_load))
df = df[df['Time'] < df.loc[max_load_index]['Time']].dropna(subset=['Time'])
return df
def min_positive_loading_rate(df, **kwargs):
"""
Function to automatically discard data that happens before a defined loading rate (dLoad).
Meant for windowing of static tests.
:param df: Pandas dataframe containing at least a 'Time' and 'PA0' (= Load) column
:param kwargs: 'loading_rate': set minimum (default 1 / second)
't_lowerbound': cut off data preemptively
:return: New dataframe containing only entries that happen
after the minimum loading rate has occurred.
"""
# df = df[df['Time'] > kwargs.get('t_lower', 0)]
load_data = df[['Time', 'PA0']].dropna()
loading_rate = kwargs.get('loading_rate')
if loading_rate is None:
loading_rate = 0.012
# Resample the load data
load_data['Time'] = pd.to_timedelta(load_data['Time']*1E9)
load_data = load_data.set_index('Time').resample('1S').mean()
# Simple difference method to compute approximation of derivative
load_data['dPA0'] = load_data['PA0'].diff() # column now contains load change/second
time_lower = load_data.loc[load_data['dPA0'] > loading_rate].index[0].total_seconds()
load_lower = load_data.loc[load_data['dPA0'] > loading_rate]['PA0'][0]
df = df[df['Time'] > time_lower].dropna(how='all', axis=1)
print('pridb_filters: filtered all data before load reached %s kN (rate criterion: %s).'
% (str(load_lower), str(loading_rate)))
# import matplotlib.pyplot as plt
# plt.plot(load_data['dPA0'])
# plt.show()
return time_filter(df, reframe_time=True)
def split_min_max_load(df, **kwargs):
"""
For fatigue tests: split load data into rolling max & rolling min, so they
can be legibly plotted alongside AE parameters as a function of time
"""
# df['load_min'] = pd.rolling_min(df['PA0'], 1000, min_periods=200, center=True)
# df['load_max'] = pd.rolling_max(df['PA0'], 1000, min_periods=200, center=True)
# pd.rolling_min/_max is deprecated
df['load_min'] = df['PA0'].rolling(min_periods=200, window=1000, center=True).min()
df['load_max'] = df['PA0'].rolling(min_periods=200, window=1000, center=True).max()
df['displ_min'] = df['PA1'].rolling(min_periods=200, window=1000, center=True).min().\
rolling(min_periods=200, window=3000, center=True).median()
df['displ_max'] = df['PA1'].rolling(min_periods=200, window=1000, center=True).max().\
rolling(min_periods=200, window=3000, center=True).median()
return df
def duration_filter(df, **kwargs):
"""
:param df: Pandas dataframe containing 'Dur' column
:param kwargs: int lowerbound and/or int upperbound
if test_id: lookup filter parameters in pridb_filter_config.ini
:return: New dataframe containing only entries with
duration between lowerbound & upperbound params
"""
test_id = kwargs.get('from_config', None)
d_lower = kwargs.get('lowerbound', None)
d_upper = kwargs.get('upperbound', None)
if test_id:
d_lower = config_getter('General', 'DurationLowerBound')
d_upper = config_getter('General', 'DurationUpperBound')
oldlen = len(df[df['TRAI'] > 0].index)
if d_upper:
df.drop(df[df['Dur'] > d_upper].index, inplace=True)
if d_lower:
df.drop(df[df['Dur'] < d_lower].index, inplace=True)
newlen = len(df[df['TRAI'] > 0].index)
if oldlen != newlen:
percent_deleted = round(100 * (float(oldlen - newlen)) / oldlen, 2)
print('pridb_filters: filtered %s hits (%s %%) outside duration bounds <%s, %s>.'
% (str(oldlen-newlen), str(percent_deleted), str(d_lower), str(d_upper)))
return df
def hit_num_filter(df, **kwargs):
"""
:param df: Pandas dataframe containing AE hits
Filter a defined amount of hits from a dataframe. Note this method does not use
TRAI or SetID values, as this method is called after filtering.
h_lower: lowerbound of hit numbers to include
h_upper: upperbound of hit numbers (python bound, so exclusive)
h_amount: number of hits to keep.
If h_amount < 1: assume it represents a fraction of total hits
if test_id: lookup filter parameters in pridb_filter_config.ini
if h_amount: ignore h_upper
if not h_lower: assume h_lower=0
"""
test_id = kwargs.get('from_config', None)
h_lower = kwargs.get('lowerbound', None)
h_upper = kwargs.get('upperbound', None)
h_amount = kwargs.get('amount', None)
if test_id:
h_lower = config_getter(test_id, 'HitCountLowerBound', 0)
h_upper = config_getter(test_id, 'HitCountUpperBound')
if not h_amount and not h_upper: # Neither upper nor amount: not enough information
return df
# Separate data: we will only want to discard hit rows, not load rows
load_data = df[df['TRAI'].isnull()].dropna()
df = df[df['TRAI'] > 0].dropna(subset=['Time'])
print df.head(10)
if h_amount:
if h_amount < 1:
h_amount *= len(df.index)
df = df.iloc[h_lower:(h_lower+h_amount)]
elif h_upper:
df = df.iloc[h_lower:h_upper]
# Rejoin data
df = df.append(load_data)
newlen = len(df[df['TRAI'] > 0].index)
print('pridb_filters: returned %s hits between hit number bounds <%s, %s>.'
% (str(newlen), str(h_lower), str(h_upper)))
return df
def time_filter(df, **kwargs):
"""
:param df: Pandas dataframe containing 'Time' column
:param kwargs: int lowerbound and/or int upperbound [seconds]
boolean reframe_time
if test_id: lookup filter parameters in pridb_filter_config.ini
:return: New dataframe containing only entries with
time between lowerbound & upperbound params.
If reframe_time set to True, also shift all
timecodes by -min(df['Time']).
"""
test_id = kwargs.get('from_config', None)
t_lower = kwargs.get('lowerbound', None)
t_upper = kwargs.get('upperbound', None)
t_amount = kwargs.get('max_amount', None)
reframe_time = kwargs.get('reframe_time', False)
if test_id:
reframe_time = config_getter('General', 'ReframeTime')
t_lower = config_getter(test_id, 'TimeLowerBound')
t_upper = config_getter(test_id, 'TimeUpperBound')
t_amount = config_getter(test_id, 'TimeMaxAmount')
oldlen = len(df[df['TRAI'] > 0].index)
if t_upper:
df['Time'] = df[df['Time'] < t_upper]['Time']
df.dropna(subset=['Time'], inplace=True)
if t_lower:
df['Time'] = df[df['Time'] > t_lower]['Time']
df.dropna(subset=['Time'], inplace=True)
newlen = len(df[df['TRAI'] > 0].index)
if t_upper or t_lower:
print('pridb_filters: filtered %s hits outside time bounds <%s, %s>.'
% (str(oldlen-newlen), str(t_lower), str(t_upper)))
if reframe_time:
offset = df['Time'].min()
df['Time'] = df['Time'].subtract(offset, axis=0)
print('pridb_filters: offset time values by %s seconds.' % str(-offset))
if t_amount and not kwargs.get('ignore_amount', False):
t_amount_corr = t_amount - df['Time'].min()
df['Time'] = df[df['Time'] < t_amount_corr]['Time']
df.dropna(subset=['Time'], inplace=True)
print('pridb_filters: filtered %s hits outside the first %s minutes' %
(str(newlen - len(df[df['TRAI'] > 0].index)), str(t_amount/60)))
return df
def energy_filter(df, **kwargs):
"""
:param df: Pandas dataframe containing 'Eny' column
:param kwargs: int lowerbound and/or int upperbound
if test_id: lookup filter parameters in pridb_filter_config.ini
:return: New dataframe containing only entries with
energy between lowerbound & upperbound params
"""
test_id = kwargs.get('from_config', None)
e_lower = kwargs.get('lowerbound', None)
e_upper = kwargs.get('upperbound', None)
if test_id:
e_lower = config_getter(test_id, 'EnergyLowerBound')
e_upper = config_getter(test_id, 'EnergyUpperBound')
oldlen = len(df[df['TRAI'] > 0].index)
if e_upper:
df.drop(df[df['Eny'] > e_upper].index, inplace=True)
if e_lower:
df.drop(df[df['Eny'] < e_lower].index, inplace=True)
newlen = len(df[df['TRAI'] > 0].index)
if oldlen != newlen:
print('pridb_filters: filtered %s hits outside energy bounds <%s, %s>.'
% (str(oldlen-newlen), str(e_lower), str(e_upper)))
return df
def amplitude_filter(df, **kwargs):
"""
:param df: Pandas dataframe containing 'Amp' column
:param kwargs: int lowerbound and/or int upperbound
if test_id: lookup filter parameters in pridb_filter_config.ini
:return: New dataframe containing only entries with
amplitude between lowerbound & upperbound params
"""
test_id = kwargs.get('from_config', None)
a_lower = kwargs.get('lowerbound', None)
a_upper = kwargs.get('upperbound', None)
if test_id:
a_lower = config_getter(test_id, 'AmplitudeLowerBound')
a_upper = config_getter(test_id, 'AmplitudeUpperBound')
oldlen = len(df[df['TRAI'] > 0].index)
if a_upper:
df.drop(df[df['Amp'] > a_upper].index, inplace=True)
if a_lower:
df.drop(df[df['Amp'] < a_upper].index, inplace=True)
newlen = len(df[df['TRAI'] > 0].index)
if oldlen != newlen:
print('pridb_filters: filtered %s hits outside amplitude bounds <%s, %s>.'
% (str(oldlen-newlen), str(a_lower), str(a_upper)))
return df
def remove_reflections(df, **kwargs):
"""
Important note: In the current implementation, remove_reflections needs a sequentially INTACT TRAI index.
It is therefore best to remove reflections before applying any other filters.
:param df: Pandas dataframe containing 'Counts' column
:param kwargs: int counts_threshold - number of counts above which to expect reflections.
:return: Dataframe that has all the 1-count rows remove that follow a row that has
>= counts_threshold counts.
"""
counts_threshold = kwargs.get('counts_threshold', 3)
oldlen = len(df[df['TRAI'] > 0].index)
# df['DeltaTime'] = df['Time'].diff()
# row i has Time(i) - Time(i-1). Therefore,
# DeltaTime says for each hit how long it was since the hit before.
# Find all entries above the counts threshold.
# Keep (TRAI) indices of entries that follow aforementioned ones:
df_delete_candidates = df[df['Counts'] >= counts_threshold]['TRAI'].apply(lambda x: x+1)
# Check for index out of bounds danger
if df['TRAI'].max() + 1 == max(df_delete_candidates):
df_delete_candidates = \
df_delete_candidates[df_delete_candidates != max(df_delete_candidates)]
# Following statement successfully selects ONLY the to be discarded entries
# WANT: not (candidate and C=1). BUT: not(A and B) = not(A) or not(B)
df = df[~df['TRAI'].isin(df_delete_candidates) | (df['Counts'] != 1)]
newlen = len(df[df['TRAI'] > 0].index)
percent_deleted = round(100*(float(oldlen-newlen))/oldlen, 2)
print('pridb_filters: filtered %s hits (%s %%) as reflections.'
% (str(int(oldlen-newlen)), str(percent_deleted)))
return df
def label_load(df):
"""
Label hits with the 'up'/'top'/'down'/'bottom' fatigue cycle phase labels
:param df: Pandas dataframe containing at least a 'PA0' column
:return: df with properly filled column 'cycle_label'
"""
old_index_name = df.index.name
df[old_index_name] = df.index
df.set_index('Time', drop=False, inplace=True)
t = time.time()
df['PA0'].interpolate(method='index', limit_direction='both', inplace=True)
df['dPA0'] = np_gradient(df['PA0'])
if 'PA1' in df.columns:
df['PA1'].interpolate(method='index', limit_direction='both', inplace=True)
df['dPA1'] = np_gradient(df['PA1'])
load_max = df['PA0'].rolling(window=100, center=True).max().median()
load_min = df['PA0'].rolling(window=100, center=True).min().median()
load_amplitude = (load_max-load_min)/2.
load_middle = load_min + load_amplitude
load_decide = load_amplitude*.5*2**.5 # 1/2 sqrt(2) of the maximum amplitude
df['cycle_label'] = df.apply(_label_load_helper, args=(load_middle, load_decide), axis=1)
df.set_index(old_index_name, inplace=True)
print('pridb_filters: adding cycle labels took %s s' % str(time.time()-t))
return df
@jit
def _label_load_helper(row, load_middle, load_decide):
if row['PA0'] > load_middle + load_decide:
return 'top'
elif row['PA0'] < load_middle - load_decide:
return 'bottom'
elif row['dPA0'] > 0:
return 'up'
elif row['dPA0'] < 0:
return 'down'
| gpl-3.0 |
RobertABT/heightmap | build/matplotlib/examples/axes_grid/simple_anchored_artists.py | 16 | 1950 | import matplotlib.pyplot as plt
def draw_text(ax):
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredText
at = AnchoredText("Figure 1a",
loc=2, prop=dict(size=8), frameon=True,
)
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
ax.add_artist(at)
at2 = AnchoredText("Figure 1(b)",
loc=3, prop=dict(size=8), frameon=True,
bbox_to_anchor=(0., 1.),
bbox_transform=ax.transAxes
)
at2.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
ax.add_artist(at2)
def draw_circle(ax): # circle in the canvas coordinate
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredDrawingArea
from matplotlib.patches import Circle
ada = AnchoredDrawingArea(20, 20, 0, 0,
loc=1, pad=0., frameon=False)
p = Circle((10, 10), 10)
ada.da.add_artist(p)
ax.add_artist(ada)
def draw_ellipse(ax):
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredEllipse
# draw an ellipse of width=0.1, height=0.15 in the data coordinate
ae = AnchoredEllipse(ax.transData, width=0.1, height=0.15, angle=0.,
loc=3, pad=0.5, borderpad=0.4, frameon=True)
ax.add_artist(ae)
def draw_sizebar(ax):
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar
# draw a horizontal bar with length of 0.1 in Data coordinate
# (ax.transData) with a label underneath.
asb = AnchoredSizeBar(ax.transData,
0.1,
r"1$^{\prime}$",
loc=8,
pad=0.1, borderpad=0.5, sep=5,
frameon=False)
ax.add_artist(asb)
if 1:
ax = plt.gca()
ax.set_aspect(1.)
draw_text(ax)
draw_circle(ax)
draw_ellipse(ax)
draw_sizebar(ax)
plt.show()
| mit |
nicholasmalaya/paleologos | exp/press_trans/code/read_incline_error.py | 2 | 2212 | #!/bin/py
#
# open file
# read contents
# (re)start when third column found
#
import sys
#
# open and read file
#
path="../data/statistics_incl.lvm"
file = open(path, "r+")
#
# data objects
#
set_names = []
voltage = []
std = []
height = []
voltage2 = []
std2 = []
height2 = []
for line in file:
#
# sep by whitespace
#
line_list = line.split()
set_name=line_list[5:]
set_names.append(' '.join(set_name))
if( len(set_names) > 9):
height2.append(line_list[8])
voltage2.append(line_list[2])
std2.append(line_list[3])
else:
height.append(line_list[8])
voltage.append(line_list[2])
std.append(line_list[3])
#
# clean up
#
file.close()
#
# least squares curve fit
#
import numpy as np
from scipy import stats
height = [float(i) for i in height]
voltage = [float(i) for i in voltage]
std = [float(i) for i in std]
(slope, intercept, r_value, p_value, std_err) = stats.linregress(height,voltage)
print "r-squared:", r_value**2
print 'p_value', p_value
print 'slope: ', slope
print 'std error: ', std_err
print '\sigma x^2: ',sum(height)
#
# calculate uncertainty
#
h = np.array(height)
s = np.array(std)
print len(s)
buff = h*h
uq = 100*2*s*np.sqrt(std_err)*(1.0/len(s) + buff/sum(height))
print uq
#
# plot it!
#
import matplotlib.pyplot as plt
plt.subplot(1, 1, 1)
line = slope*np.array(height) + intercept
plt.errorbar(height, line,color='blue',yerr=uq,label='Least Squares Fit w/ Uncertainty')
plt.title('Calibration of an Pressure Transducer: Micromanometer')
plt.ylabel('Voltage')
plt.xlabel('Height (Inches)')
plt.legend(loc='best')
plt.savefig('incl-error.png')
plt.close()
#
# steady as she goes
#
sys.exit(0)
#
# nick
# 9/9/15
#
# old header:
#
# X_Dimension Time Time Time Time
# X0 0.0000000000000000E+0 0.0000000000000000E+0 0.0000000000000000E+0 0.0000000000000000E+0
# Delta_X 0.001000 0.001000 0.001000 0.001000
# ***End_of_Header***
# X_Value Pressure Transducer Voltage (Arith. Mean) Pressure Transducer Voltage (Std Dev) Pressure Transducer Voltage (Variance) Pressure Transduce\
# r Voltage (Total Samples) Comment
| mit |
lssfau/walberla | apps/benchmarks/UniformGrid/ecmModel.py | 1 | 2190 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
kernels = dict()
class Kernel:
def __init__(self, name, cyclesFirstLoop=0, cyclesSecondLoop=0, cyclesRegPerLUP=0):
self.name = name
if cyclesRegPerLUP <= 0:
self.cyclesFirstLoop = cyclesFirstLoop
self.cyclesSecondLoop = cyclesSecondLoop
self.cyclesRegPerLUP = cyclesFirstLoop + 9 * cyclesSecondLoop
else:
self.cyclesRegPerLUP = cyclesRegPerLUP
self.cyclesRegPerCacheLine = 8 * self.cyclesRegPerLUP
self.cyclesL1L2 = 3 * 19 * 2
self.cyclesL2L3 = 3 * 19 * 2
self.freq = 2.7e9
self.cyclesMem = 305
# self.cyclesMem = 191
def mlups(self, processes):
singleCoreCycles = self.cyclesRegPerCacheLine + self.cyclesL1L2 + self.cyclesL2L3 + self.cyclesMem
timeSingleCore = singleCoreCycles / self.freq
mlups = 8 / timeSingleCore * 1e-6
# todo
mlupsMax = 78
return min(processes * mlups, mlupsMax)
def plot(self, divideByProcesses=False, processes=8, label=""):
x = np.arange(1, processes + 1, 1)
if divideByProcesses:
y = np.array([self.mlups(i) / i for i in x])
else:
y = np.array([self.mlups(i) for i in x])
if label == "":
label = "ecm_" + self.name
plt.plot(x, y, marker='^', markersize=5, label=label)
kernels = dict()
# kernels['srt_split'] = Kernel("srt_split", 46, 12 )
kernels['srt_pure'] = Kernel("srt_pure", 40, 8)
kernels['trt_split'] = Kernel("trt_split", 41, 11)
# SRTStreamCollide.h - pgo and lto (20cycles first loop, 35 second)
kernels['srt_nonopt'] = Kernel("srt_nonopt",
cyclesRegPerLUP=1045)
# kernels['trt_pure_intelOpt'] = Kernel("trt_pure_intelOpt", 41/2, 10/2 ) # vectorized (v*pd)
def plotAllKernels(divideByProcesses=False):
for kernel in kernels:
kernel.plot(divideByProcesses)
def plot(kernelName, divideByProcesses=False, label=""):
kernels[kernelName].plot(divideByProcesses, label=label)
if __name__ == "__main__":
plotAllKernels()
plt.legend()
plt.show()
| gpl-3.0 |
vlouf/cpol_processing | cpol_processing/filtering.py | 1 | 9055 | """
Codes for creating and manipulating gate filters. New functions: use of trained
Gaussian Mixture Models to remove noise and clutter from CPOL data before 2009.
@title: filtering.py
@author: Valentin Louf <valentin.louf@bom.gov.au>
@institutions: Monash University and the Australian Bureau of Meteorology
@created: 20/11/2017
@date: 25/02/2021
.. autosummary::
:toctree: generated/
texture
get_clustering
get_gatefilter_GMM
do_gatefilter_cpol
do_gatefilter
"""
# Libraries
import os
import gzip
import pickle
import pyart
import cftime
import numpy as np
import pandas as pd
def texture(data: np.ndarray) -> np.ndarray:
"""
Compute the texture of data.
Compute the texture of the data by comparing values with a 3x3 neighborhood
(based on :cite:`Gourley2007`). NaN values in the original array have
NaN textures. (Wradlib function)
Parameters:
==========
data : :class:`numpy:numpy.ndarray`
multi-dimensional array with shape (..., number of beams, number
of range bins)
Returns:
=======
texture : :class:`numpy:numpy.ndarray`
array of textures with the same shape as data
"""
x1 = np.roll(data, 1, -2) # center:2
x2 = np.roll(data, 1, -1) # 4
x3 = np.roll(data, -1, -2) # 8
x4 = np.roll(data, -1, -1) # 6
x5 = np.roll(x1, 1, -1) # 1
x6 = np.roll(x4, 1, -2) # 3
x7 = np.roll(x3, -1, -1) # 9
x8 = np.roll(x2, -1, -2) # 7
# at least one NaN would give a sum of NaN
xa = np.array([x1, x2, x3, x4, x5, x6, x7, x8])
# get count of valid neighboring pixels
xa_valid = np.ones(np.shape(xa))
xa_valid[np.isnan(xa)] = 0
# count number of valid neighbors
xa_valid_count = np.sum(xa_valid, axis=0)
num = np.zeros(data.shape)
for xarr in xa:
diff = data - xarr
# difference of NaNs will be converted to zero
# (to not affect the summation)
diff[np.isnan(diff)] = 0
# only those with valid values are considered in the summation
num += diff ** 2
# reinforce that NaN values should have NaN textures
num[np.isnan(data)] = np.nan
return np.sqrt(num / xa_valid_count)
def get_clustering(radar, vel_name: str = "VEL", phidp_name: str = "PHIDP", zdr_name: str = "ZDR"):
"""
Create cluster using a trained Gaussian Mixture Model (I use scikit-learn)
to cluster the radar data. Cluster 5 is clutter and 2 is noise. Cluster 1
correponds to a high gradient on PHIDP (folding), so it may corresponds to
either real data that fold or noise. A threshold on reflectivity should be
used on cluster 1.
Parameters:
===========
radar:
Py-ART radar structure.
vel_name: str
Velocity field name.
phidp_name: str
Name of the PHIDP field.
zdr_name: str
Name of the differential_reflectivity field.
Returns:
========
cluster: ndarray
Data ID using GMM (5: clutter, 2: noise, and 1: high-phidp gradient).
"""
# Load and deserialize GMM
location = os.path.dirname(os.path.realpath(__file__))
my_file = os.path.join(location, "data", "GM_model_CPOL.pkl.gz")
with gzip.GzipFile(my_file, "r") as gzid:
gmm = pickle.load(gzid)
df_orig = pd.DataFrame(
{
"VEL": texture(radar.fields[vel_name]["data"]).flatten(),
"PHIDP": texture(radar.fields[phidp_name]["data"]).flatten(),
"ZDR": texture(radar.fields[zdr_name]["data"]).flatten(),
}
)
df = df_orig.dropna()
pos_droped = df_orig.dropna().index
clusters = gmm.predict(df)
r = radar.range["data"]
time = radar.time["data"]
R, _ = np.meshgrid(r, time)
clus = np.zeros_like(R.flatten())
clus[pos_droped] = clusters + 1
cluster = clus.reshape(R.shape)
return cluster
def get_gatefilter_GMM(
radar, refl_name: str = "DBZ", vel_name: str = "VEL", phidp_name: str = "PHIDP", zdr_name: str = "ZDR"
):
"""
Filtering function adapted to CPOL before 2009 using ML Gaussian Mixture
Model. Function does 4 things:
1) Cutoff of the reflectivities below the noise level.
2) GMM using the texture of velocity, phidp and zdr.
3) Filtering using 1) and 2) results.
4) Removing temporary fields from the radar object.
Parameters:
===========
radar:
Py-ART radar structure.
refl_name: str
Reflectivity field name.
vel_name: str
Velocity field name.
phidp_name: str
Name of the PHIDP field.
zdr_name: str
Name of the differential_reflectivity field.
Returns:
========
gf: GateFilter
Gate filter (excluding all bad data).
"""
# GMM clustering (indpdt from cutoff)
cluster = get_clustering(radar, vel_name=vel_name, phidp_name=phidp_name, zdr_name=zdr_name)
radar.add_field_like(refl_name, "CLUS", cluster, replace_existing=True)
pos = (cluster == 1) & (radar.fields[refl_name]["data"] < 20)
radar.add_field_like(refl_name, "TPOS", pos, replace_existing=True)
# Using GMM results to filter.
gf = pyart.filters.GateFilter(radar)
gf.exclude_equal("CLUS", 5)
gf.exclude_equal("CLUS", 2)
gf.exclude_equal("TPOS", 1)
gf = pyart.correct.despeckle_field(radar, refl_name, gatefilter=gf)
# Removing temp keys.
for k in ["TPOS", "CLUS"]:
try:
radar.fields.pop(k)
except KeyError:
continue
return gf
def do_gatefilter_cpol(
radar,
refl_name: str = "DBZ",
phidp_name: str = "PHIDP",
rhohv_name: str = "RHOHV_CORR",
zdr_name: str = "ZDR",
snr_name: str = "SNR",
vel_name: str = "VEL",
):
"""
Filtering function adapted to CPOL.
Parameters:
===========
radar:
Py-ART radar structure.
refl_name: str
Reflectivity field name.
rhohv_name: str
Cross correlation ratio field name.
ncp_name: str
Name of the normalized_coherent_power field.
zdr_name: str
Name of the differential_reflectivity field.
Returns:
========
gf_despeckeld: GateFilter
Gate filter (excluding all bad data).
"""
radar_start_date = cftime.num2pydate(radar.time["data"][0], radar.time["units"])
# if radar_start_date.year < 2009:
gf = get_gatefilter_GMM(
radar, refl_name=refl_name, vel_name=vel_name, phidp_name=phidp_name, zdr_name=zdr_name,
)
# else:
# gf = pyart.filters.GateFilter(radar)
r = radar.range["data"]
azi = radar.azimuth["data"]
R, _ = np.meshgrid(r, azi)
# refl = radar.fields[refl_name]["data"].copy()
# fcut = 10 * np.log10(4e-5 * R)
# refl[refl < fcut] = np.NaN
# radar.add_field_like(refl_name, "NDBZ", refl)
# gf.exclude_invalid("NDBZ")
gf.exclude_below(snr_name, 9)
gf.exclude_outside(zdr_name, -3.0, 7.0)
gf.exclude_outside(refl_name, -20.0, 80.0)
# if radar_start_date.year > 2007:
# gf.exclude_below(rhohv_name, 0.7)
# else:
rhohv = radar.fields[rhohv_name]["data"]
pos = np.zeros_like(rhohv) + 1
pos[(R < 90e3) & (rhohv < 0.7)] = 0
radar.add_field_like(refl_name, "TMPRH", pos)
gf.exclude_equal("TMPRH", 0)
# Remove rings in march 1999.
if radar_start_date.year == 1999 and radar_start_date.month == 3:
radar.add_field_like(refl_name, "RRR", R)
gf.exclude_above("RRR", 140e3)
gf_despeckeld = pyart.correct.despeckle_field(radar, refl_name, gatefilter=gf)
# Remove temporary fields.
for k in ["NDBZ", "RRR", "TMPRH"]:
try:
radar.fields.pop(k)
except KeyError:
pass
return gf_despeckeld
def do_gatefilter(
radar,
refl_name: str = "DBZ",
phidp_name: str = "PHIDP",
rhohv_name: str = "RHOHV_CORR",
zdr_name: str = "ZDR",
snr_name: str = "SNR",
):
"""
Basic filtering function for dual-polarisation data.
Parameters:
===========
radar:
Py-ART radar structure.
refl_name: str
Reflectivity field name.
rhohv_name: str
Cross correlation ratio field name.
ncp_name: str
Name of the normalized_coherent_power field.
zdr_name: str
Name of the differential_reflectivity field.
Returns:
========
gf_despeckeld: GateFilter
Gate filter (excluding all bad data).
"""
# Initialize gatefilter
gf = pyart.filters.GateFilter(radar)
# Remove obviously wrong data.
gf.exclude_outside(zdr_name, -6.0, 7.0)
gf.exclude_outside(refl_name, -20.0, 80.0)
# Compute texture of PHIDP and remove noise.
dphi = texture(radar.fields[phidp_name]["data"])
radar.add_field_like(phidp_name, "PHITXT", dphi)
gf.exclude_above("PHITXT", 20)
gf.exclude_below(rhohv_name, 0.6)
# Despeckle
gf_despeckeld = pyart.correct.despeckle_field(radar, refl_name, gatefilter=gf)
try:
# Remove PHIDP texture
radar.fields.pop("PHITXT")
except Exception:
pass
return gf_despeckeld
| mit |
brev/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/texmanager.py | 69 | 16818 | """
This module supports embedded TeX expressions in matplotlib via dvipng
and dvips for the raster and postscript backends. The tex and
dvipng/dvips information is cached in ~/.matplotlib/tex.cache for reuse between
sessions
Requirements:
* latex
* \*Agg backends: dvipng
* PS backend: latex w/ psfrag, dvips, and Ghostscript 8.51
(older versions do not work properly)
Backends:
* \*Agg
* PS
* PDF
For raster output, you can get RGBA numpy arrays from TeX expressions
as follows::
texmanager = TexManager()
s = '\\TeX\\ is Number $\\displaystyle\\sum_{n=1}^\\infty\\frac{-e^{i\pi}}{2^n}$!'
Z = self.texmanager.get_rgba(s, size=12, dpi=80, rgb=(1,0,0))
To enable tex rendering of all text in your matplotlib figure, set
text.usetex in your matplotlibrc file (http://matplotlib.sf.net/matplotlibrc)
or include these two lines in your script::
from matplotlib import rc
rc('text', usetex=True)
"""
import copy, glob, os, shutil, sys, warnings
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
import distutils.version
import numpy as np
import matplotlib as mpl
from matplotlib import rcParams
from matplotlib._png import read_png
DEBUG = False
if sys.platform.startswith('win'): cmd_split = '&'
else: cmd_split = ';'
def dvipng_hack_alpha():
stdin, stdout = os.popen4('dvipng -version')
for line in stdout:
if line.startswith('dvipng '):
version = line.split()[-1]
mpl.verbose.report('Found dvipng version %s'% version,
'helpful')
version = distutils.version.LooseVersion(version)
return version < distutils.version.LooseVersion('1.6')
raise RuntimeError('Could not obtain dvipng version')
class TexManager:
"""
Convert strings to dvi files using TeX, caching the results to a
working dir
"""
oldpath = mpl.get_home()
if oldpath is None: oldpath = mpl.get_data_path()
oldcache = os.path.join(oldpath, '.tex.cache')
configdir = mpl.get_configdir()
texcache = os.path.join(configdir, 'tex.cache')
if os.path.exists(oldcache):
print >> sys.stderr, """\
WARNING: found a TeX cache dir in the deprecated location "%s".
Moving it to the new default location "%s"."""%(oldcache, texcache)
shutil.move(oldcache, texcache)
if not os.path.exists(texcache):
os.mkdir(texcache)
_dvipng_hack_alpha = dvipng_hack_alpha()
# mappable cache of
rgba_arrayd = {}
grey_arrayd = {}
postscriptd = {}
pscnt = 0
serif = ('cmr', '')
sans_serif = ('cmss', '')
monospace = ('cmtt', '')
cursive = ('pzc', r'\usepackage{chancery}')
font_family = 'serif'
font_families = ('serif', 'sans-serif', 'cursive', 'monospace')
font_info = {'new century schoolbook': ('pnc',
r'\renewcommand{\rmdefault}{pnc}'),
'bookman': ('pbk', r'\renewcommand{\rmdefault}{pbk}'),
'times': ('ptm', r'\usepackage{mathptmx}'),
'palatino': ('ppl', r'\usepackage{mathpazo}'),
'zapf chancery': ('pzc', r'\usepackage{chancery}'),
'cursive': ('pzc', r'\usepackage{chancery}'),
'charter': ('pch', r'\usepackage{charter}'),
'serif': ('cmr', ''),
'sans-serif': ('cmss', ''),
'helvetica': ('phv', r'\usepackage{helvet}'),
'avant garde': ('pag', r'\usepackage{avant}'),
'courier': ('pcr', r'\usepackage{courier}'),
'monospace': ('cmtt', ''),
'computer modern roman': ('cmr', ''),
'computer modern sans serif': ('cmss', ''),
'computer modern typewriter': ('cmtt', '')}
_rc_cache = None
_rc_cache_keys = ('text.latex.preamble', )\
+ tuple(['font.'+n for n in ('family', ) + font_families])
def __init__(self):
if not os.path.isdir(self.texcache):
os.mkdir(self.texcache)
ff = rcParams['font.family'].lower()
if ff in self.font_families:
self.font_family = ff
else:
mpl.verbose.report('The %s font family is not compatible with LaTeX. serif will be used by default.' % ff, 'helpful')
self.font_family = 'serif'
fontconfig = [self.font_family]
for font_family, font_family_attr in \
[(ff, ff.replace('-', '_')) for ff in self.font_families]:
for font in rcParams['font.'+font_family]:
if font.lower() in self.font_info:
found_font = self.font_info[font.lower()]
setattr(self, font_family_attr,
self.font_info[font.lower()])
if DEBUG:
print 'family: %s, font: %s, info: %s'%(font_family,
font, self.font_info[font.lower()])
break
else:
if DEBUG: print '$s font is not compatible with usetex'
else:
mpl.verbose.report('No LaTeX-compatible font found for the %s font family in rcParams. Using default.' % ff, 'helpful')
setattr(self, font_family_attr, self.font_info[font_family])
fontconfig.append(getattr(self, font_family_attr)[0])
self._fontconfig = ''.join(fontconfig)
# The following packages and commands need to be included in the latex
# file's preamble:
cmd = [self.serif[1], self.sans_serif[1], self.monospace[1]]
if self.font_family == 'cursive': cmd.append(self.cursive[1])
while r'\usepackage{type1cm}' in cmd:
cmd.remove(r'\usepackage{type1cm}')
cmd = '\n'.join(cmd)
self._font_preamble = '\n'.join([r'\usepackage{type1cm}', cmd,
r'\usepackage{textcomp}'])
def get_basefile(self, tex, fontsize, dpi=None):
"""
returns a filename based on a hash of the string, fontsize, and dpi
"""
s = ''.join([tex, self.get_font_config(), '%f'%fontsize,
self.get_custom_preamble(), str(dpi or '')])
# make sure hash is consistent for all strings, regardless of encoding:
bytes = unicode(s).encode('utf-8')
return os.path.join(self.texcache, md5(bytes).hexdigest())
def get_font_config(self):
"""Reinitializes self if relevant rcParams on have changed."""
if self._rc_cache is None:
self._rc_cache = dict([(k,None) for k in self._rc_cache_keys])
changed = [par for par in self._rc_cache_keys if rcParams[par] != \
self._rc_cache[par]]
if changed:
if DEBUG: print 'DEBUG following keys changed:', changed
for k in changed:
if DEBUG:
print 'DEBUG %-20s: %-10s -> %-10s' % \
(k, self._rc_cache[k], rcParams[k])
# deepcopy may not be necessary, but feels more future-proof
self._rc_cache[k] = copy.deepcopy(rcParams[k])
if DEBUG: print 'DEBUG RE-INIT\nold fontconfig:', self._fontconfig
self.__init__()
if DEBUG: print 'DEBUG fontconfig:', self._fontconfig
return self._fontconfig
def get_font_preamble(self):
"""
returns a string containing font configuration for the tex preamble
"""
return self._font_preamble
def get_custom_preamble(self):
"""returns a string containing user additions to the tex preamble"""
return '\n'.join(rcParams['text.latex.preamble'])
def _get_shell_cmd(self, *args):
"""
On windows, changing directories can be complicated by the presence of
multiple drives. get_shell_cmd deals with this issue.
"""
if sys.platform == 'win32':
command = ['%s'% os.path.splitdrive(self.texcache)[0]]
else:
command = []
command.extend(args)
return ' && '.join(command)
def make_tex(self, tex, fontsize):
"""
Generate a tex file to render the tex string at a specific font size
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
texfile = '%s.tex'%basefile
fh = file(texfile, 'w')
custom_preamble = self.get_custom_preamble()
fontcmd = {'sans-serif' : r'{\sffamily %s}',
'monospace' : r'{\ttfamily %s}'}.get(self.font_family,
r'{\rmfamily %s}')
tex = fontcmd % tex
if rcParams['text.latex.unicode']:
unicode_preamble = """\usepackage{ucs}
\usepackage[utf8x]{inputenc}"""
else:
unicode_preamble = ''
s = r"""\documentclass{article}
%s
%s
%s
\usepackage[papersize={72in,72in}, body={70in,70in}, margin={1in,1in}]{geometry}
\pagestyle{empty}
\begin{document}
\fontsize{%f}{%f}%s
\end{document}
""" % (self._font_preamble, unicode_preamble, custom_preamble,
fontsize, fontsize*1.25, tex)
if rcParams['text.latex.unicode']:
fh.write(s.encode('utf8'))
else:
try:
fh.write(s)
except UnicodeEncodeError, err:
mpl.verbose.report("You are using unicode and latex, but have "
"not enabled the matplotlib 'text.latex.unicode' "
"rcParam.", 'helpful')
raise
fh.close()
return texfile
def make_dvi(self, tex, fontsize):
"""
generates a dvi file containing latex's layout of tex string
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
dvifile = '%s.dvi'% basefile
if DEBUG or not os.path.exists(dvifile):
texfile = self.make_tex(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"'% self.texcache,
'latex -interaction=nonstopmode %s > "%s"'\
%(os.path.split(texfile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
try:
fh = file(outfile)
report = fh.read()
fh.close()
except IOError:
report = 'No latex error report available.'
if exit_status:
raise RuntimeError(('LaTeX was not able to process the following \
string:\n%s\nHere is the full report generated by LaTeX: \n\n'% repr(tex)) + report)
else: mpl.verbose.report(report, 'debug')
for fname in glob.glob(basefile+'*'):
if fname.endswith('dvi'): pass
elif fname.endswith('tex'): pass
else:
try: os.remove(fname)
except OSError: pass
return dvifile
def make_png(self, tex, fontsize, dpi):
"""
generates a png file containing latex's rendering of tex string
returns the filename
"""
basefile = self.get_basefile(tex, fontsize, dpi)
pngfile = '%s.png'% basefile
# see get_rgba for a discussion of the background
if DEBUG or not os.path.exists(pngfile):
dvifile = self.make_dvi(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"' % self.texcache,
'dvipng -bg Transparent -D %s -T tight -o \
"%s" "%s" > "%s"'%(dpi, os.path.split(pngfile)[-1],
os.path.split(dvifile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
try:
fh = file(outfile)
report = fh.read()
fh.close()
except IOError:
report = 'No dvipng error report available.'
if exit_status:
raise RuntimeError('dvipng was not able to \
process the flowing file:\n%s\nHere is the full report generated by dvipng: \
\n\n'% dvifile + report)
else: mpl.verbose.report(report, 'debug')
try: os.remove(outfile)
except OSError: pass
return pngfile
def make_ps(self, tex, fontsize):
"""
generates a postscript file containing latex's rendering of tex string
returns the file name
"""
basefile = self.get_basefile(tex, fontsize)
psfile = '%s.epsf'% basefile
if DEBUG or not os.path.exists(psfile):
dvifile = self.make_dvi(tex, fontsize)
outfile = basefile+'.output'
command = self._get_shell_cmd('cd "%s"'% self.texcache,
'dvips -q -E -o "%s" "%s" > "%s"'\
%(os.path.split(psfile)[-1],
os.path.split(dvifile)[-1], outfile))
mpl.verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status:
raise RuntimeError('dvipng was not able to \
process the flowing file:\n%s\nHere is the full report generated by dvipng: \
\n\n'% dvifile + fh.read())
else: mpl.verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
return psfile
def get_ps_bbox(self, tex, fontsize):
"""
returns a list containing the postscript bounding box for latex's
rendering of the tex string
"""
psfile = self.make_ps(tex, fontsize)
ps = file(psfile)
for line in ps:
if line.startswith('%%BoundingBox:'):
return [int(val) for val in line.split()[1:]]
raise RuntimeError('Could not parse %s'%psfile)
def get_grey(self, tex, fontsize=None, dpi=None):
"""returns the alpha channel"""
key = tex, self.get_font_config(), fontsize, dpi
alpha = self.grey_arrayd.get(key)
if alpha is None:
pngfile = self.make_png(tex, fontsize, dpi)
X = read_png(os.path.join(self.texcache, pngfile))
if rcParams['text.dvipnghack'] is not None:
hack = rcParams['text.dvipnghack']
else:
hack = self._dvipng_hack_alpha
if hack:
# hack the alpha channel
# dvipng assumed a constant background, whereas we want to
# overlay these rasters with antialiasing over arbitrary
# backgrounds that may have other figure elements under them.
# When you set dvipng -bg Transparent, it actually makes the
# alpha channel 1 and does the background compositing and
# antialiasing itself and puts the blended data in the rgb
# channels. So what we do is extract the alpha information
# from the red channel, which is a blend of the default dvipng
# background (white) and foreground (black). So the amount of
# red (or green or blue for that matter since white and black
# blend to a grayscale) is the alpha intensity. Once we
# extract the correct alpha information, we assign it to the
# alpha channel properly and let the users pick their rgb. In
# this way, we can overlay tex strings on arbitrary
# backgrounds with antialiasing
#
# red = alpha*red_foreground + (1-alpha)*red_background
#
# Since the foreground is black (0) and the background is
# white (1) this reduces to red = 1-alpha or alpha = 1-red
#alpha = npy.sqrt(1-X[:,:,0]) # should this be sqrt here?
alpha = 1-X[:,:,0]
else:
alpha = X[:,:,-1]
self.grey_arrayd[key] = alpha
return alpha
def get_rgba(self, tex, fontsize=None, dpi=None, rgb=(0,0,0)):
"""
Returns latex's rendering of the tex string as an rgba array
"""
if not fontsize: fontsize = rcParams['font.size']
if not dpi: dpi = rcParams['savefig.dpi']
r,g,b = rgb
key = tex, self.get_font_config(), fontsize, dpi, tuple(rgb)
Z = self.rgba_arrayd.get(key)
if Z is None:
alpha = self.get_grey(tex, fontsize, dpi)
Z = np.zeros((alpha.shape[0], alpha.shape[1], 4), np.float)
Z[:,:,0] = r
Z[:,:,1] = g
Z[:,:,2] = b
Z[:,:,3] = alpha
self.rgba_arrayd[key] = Z
return Z
| agpl-3.0 |
MaxInGaussian/SCFGP | SCFGP/SCFGP.py | 1 | 13766 | ################################################################################
# SCFGP: Sparsely Correlated Fourier Features Based Gaussian Process
# Github: https://github.com/MaxInGaussian/SCFGP
# Author: Max W. Y. Lam (maxingaussian@gmail.com)
################################################################################
import sys, os, string, time
import numpy as np
import numpy.random as npr
import matplotlib.pyplot as plt
from theano import config as Tc, shared as Ts, function as Tf, tensor as TT
from theano.sandbox import linalg as Tlin
from .Scaler import Scaler
from .Optimizer import Optimizer as OPT
Tc.mode = 'FAST_RUN'
Tc.optimizer = 'fast_run'
Tc.reoptimize_unpickled_function = False
class SCFGP(object):
"""
Sparsely Correlated Fourier Features Based Gaussian Process
"""
ID, NAME, verbose = "", "", True
X_scaler, y_scaler = [None]*2
M, N, D = -1, -1, -1
X, y, hyper, Li, alpha, train_func, pred_func = [None]*7
def __init__(self, sparsity=20, nfeats=18, evals=None,
X_scaling_method='auto-inv-normal',
y_scaling_method='auto-normal', verbose=False):
self.S = sparsity
self.M = nfeats
self.X_scaler = Scaler(X_scaling_method)
self.y_scaler = Scaler(y_scaling_method)
self.evals = {
"SCORE": ["Model Selection Score", []],
"COST": ["Hyperparameter Selection Cost", []],
"MAE": ["Mean Absolute Error", []],
"NMAE": ["Normalized Mean Absolute Error", []],
"MSE": ["Mean Square Error", []],
"NMSE": ["Normalized Mean Square Error", []],
"MNLP": ["Mean Negative Log Probability", []],
"TIME(s)": ["Training Time", []],
} if evals is None else evals
self.verbose = verbose
self.generate_ID()
def message(self, *arg):
if(self.verbose):
print(" ".join(map(str, arg)))
sys.stdout.flush()
def generate_ID(self):
self.ID = ''.join(
chr(npr.choice([ord(c) for c in (
string.ascii_uppercase+string.digits)])) for _ in range(5))
self.NAME = "SCFGP (Sparsity=%d, Fourier Features=%d)"%(self.S, self.M)
def init_params(self):
a = npr.randn(1)
b = npr.randn(1)
c = npr.randn(1)
l_f = npr.randn(self.D*self.S)
r_f = npr.rand(self.M*self.S)
l_p = 2*np.pi*npr.rand(self.S)
p = 2*np.pi*npr.rand(self.M)
self.params = Ts(np.concatenate([a, b, c, l_f, r_f, l_p, p]))
def unpack_params(self, hyper):
t_ind = 0
a = hyper[0];t_ind+=1
b = hyper[1];t_ind+=1
c = hyper[2];t_ind+=1
l_f = hyper[t_ind:t_ind+self.D*self.S];t_ind+=self.D*self.S
l_F = TT.reshape(l_f, (self.D, self.S))
r_f = hyper[t_ind:t_ind+self.M*self.S];t_ind+=self.M*self.S
r_F = TT.reshape(r_f, (self.M, self.S))
F = l_F.dot(r_F.T)
l_p = hyper[t_ind:t_ind+self.S];t_ind+=self.S
l_P = TT.reshape(l_p, (1, self.S))
p = hyper[t_ind:t_ind+self.M];t_ind+=self.M
P = TT.reshape(p, (1, self.M))
l_FC = l_P-TT.mean(l_F, 0)[None, :]
FC = P-TT.mean(F, 0)[None, :]
return a, b, c, l_F, F, l_FC, FC
def build_theano_models(self, algo, algo_params):
epsilon = 1e-6
kl = lambda mu, sig: sig+mu**2-TT.log(sig)
X, y = TT.dmatrices('X', 'y')
params = TT.dvector('params')
a, b, c, l_F, F, l_FC, FC = self.unpack_params(params)
sig2_n, sig_f = TT.exp(2*a), TT.exp(b)
l_FF = TT.dot(X, l_F)+l_FC
FF = TT.concatenate((l_FF, TT.dot(X, F)+FC), 1)
Phi = TT.concatenate((TT.cos(FF), TT.sin(FF)), 1)
Phi = sig_f*TT.sqrt(2./self.M)*Phi
noise = TT.log(1+TT.exp(c))
PhiTPhi = TT.dot(Phi.T, Phi)
A = PhiTPhi+(sig2_n+epsilon)*TT.identity_like(PhiTPhi)
L = Tlin.cholesky(A)
Li = Tlin.matrix_inverse(L)
PhiTy = Phi.T.dot(y)
beta = TT.dot(Li, PhiTy)
alpha = TT.dot(Li.T, beta)
mu_f = TT.dot(Phi, alpha)
var_f = (TT.dot(Phi, Li.T)**2).sum(1)[:, None]
dsp = noise*(var_f+1)
mu_l = TT.sum(TT.mean(l_F, axis=1))
sig_l = TT.sum(TT.std(l_F, axis=1))
mu_w = TT.sum(TT.mean(F, axis=1))
sig_w = TT.sum(TT.std(F, axis=1))
hermgauss = np.polynomial.hermite.hermgauss(30)
herm_x = Ts(hermgauss[0])[None, None, :]
herm_w = Ts(hermgauss[1]/np.sqrt(np.pi))[None, None, :]
herm_f = TT.sqrt(2*var_f[:, :, None])*herm_x+mu_f[:, :, None]
nlk = (0.5*herm_f**2.-y[:, :, None]*herm_f)/dsp[:, :, None]+0.5*(
TT.log(2*np.pi*dsp[:, :, None])+y[:, :, None]**2/dsp[:, :, None])
enll = herm_w*nlk
nlml = 2*TT.log(TT.diagonal(L)).sum()+2*enll.sum()+1./sig2_n*(
(y**2).sum()-(beta**2).sum())+2*(X.shape[0]-self.M)*a
penelty = (kl(mu_w, sig_w)*self.M+kl(mu_l, sig_l)*self.S)/(self.S+self.M)
cost = (nlml+penelty)/X.shape[0]
grads = TT.grad(cost, params)
updates = getattr(OPT, algo)(self.params, grads, **algo_params)
updates = getattr(OPT, 'apply_nesterov_momentum')(updates, momentum=0.9)
train_inputs = [X, y]
train_outputs = [cost, alpha, Li]
self.train_func = Tf(train_inputs, train_outputs,
givens=[(params, self.params)])
self.train_iter_func = Tf(train_inputs, train_outputs,
givens=[(params, self.params)], updates=updates)
Xs, Li, alpha = TT.dmatrices('Xs', 'Li', 'alpha')
l_FFs = TT.dot(Xs, l_F)+l_FC
FFs = TT.concatenate((l_FFs, TT.dot(Xs, F)+FC), 1)
Phis = TT.concatenate((TT.cos(FFs), TT.sin(FFs)), 1)
Phis = sig_f*TT.sqrt(2./self.M)*Phis
mu_pred = TT.dot(Phis, alpha)
std_pred = (noise*(1+(TT.dot(Phis, Li.T)**2).sum(1)))**0.5
pred_inputs = [Xs, alpha, Li]
pred_outputs = [mu_pred, std_pred]
self.pred_func = Tf(pred_inputs, pred_outputs,
givens=[(params, self.params)])
def get_compiled_funcs(self):
return self.train_func, self.train_iter_func, self.pred_func
def set_data(self, X, y):
"""
X: Normally Distributed Inputs
Y: Normally Distributed Outputs
"""
self.message("-"*60, "\nNormalizing SCFGP training data...")
self.X_scaler.fit(X)
self.y_scaler.fit(y)
self.X = self.X_scaler.forward_transform(X)
self.y = self.y_scaler.forward_transform(y)
self.message("done.")
self.N, self.D = self.X.shape
if('train_func' not in self.__dict__.keys()):
self.message("-"*60, "\nInitializing SCFGP hyperparameters...")
self.init_params()
self.message("done.")
else:
cost, self.alpha, self.Li = self.train_func(self.X, self.y)
def minibatches(self, X, y, batchsize, shuffle=True):
assert len(X) == len(y)
if(shuffle):
inds = np.arange(len(X))
np.random.shuffle(inds)
for start_ind in range(0, len(X)-batchsize+1, batchsize):
if shuffle:
batch = inds[start_ind:start_ind+batchsize]
else:
batch = slice(start_ind, start_ind+batchsize)
yield X[batch], y[batch]
def optimize(self, Xv=None, yv=None, funcs=None, visualizer=None, **args):
obj = 'COST' if 'obj' not in args.keys() else args['obj'].upper()
obj = 'COST' if obj not in self.evals.keys() else obj
algo = {'algo': None} if 'algo' not in args.keys() else args['algo']
nbatches = 1 if 'nbatches' not in args.keys() else args['nbatches']
batchsize = 150 if 'batchsize' not in args.keys() else args['batchsize']
cvrg_tol = 1e-4 if 'cvrg_tol' not in args.keys() else args['cvrg_tol']
max_cvrg = 18 if 'max_cvrg' not in args.keys() else args['max_cvrg']
max_iter = 500 if 'max_iter' not in args.keys() else args['max_iter']
if(algo['algo'] not in OPT.algos):
algo = {
'algo': 'adam',
'algo_params': {
'learning_rate':0.01,
'beta1':0.9,
'beta2':0.999,
'epsilon':1e-8
}
}
for metric in self.evals.keys():
self.evals[metric][1] = []
if(funcs is None):
self.message("-"*50, "\nCompiling SCFGP theano model...")
self.build_theano_models(algo['algo'], algo['algo_params'])
self.message("done.")
else:
self.train_func, self.train_iter_func, self.pred_func = funcs
if(visualizer is not None):
visualizer.model = self
animate = visualizer.train_with_plot()
if(Xv is None or yv is None):
obj = 'COST'
self.evals['MAE'][1].append(0)
self.evals['NMAE'][1].append(0)
self.evals['MSE'][1].append(0)
self.evals['NMSE'][1].append(0)
self.evals['MNLP'][1].append(0)
self.evals['SCORE'][1].append(0)
self.min_obj_ind = 0
train_start_time = time.time()
min_obj_val, argmin_params, cvrg_iter = np.Infinity, self.params, 0
for iter in range(max_iter):
if(nbatches > 1):
cost_sum, params_list, batch_count = 0, [], 0
for X, y in self.minibatches(self.X, self.y, batchsize):
params_list.append(self.params.get_value())
cost, self.alpha, self.Li = self.train_iter_func(X, y)
cost_sum += cost;batch_count += 1
if(batch_count == nbatches):
break
self.params = Ts(np.median(np.array(params_list), axis=0))
self.evals['COST'][1].append(np.double(cost_sum/batch_count))
else:
cost, self.alpha, self.Li = self.train_iter_func(self.X, self.y)
self.evals['COST'][1].append(cost)
self.evals['TIME(s)'][1].append(time.time()-train_start_time)
if(Xv is not None and yv is not None):
self.predict(Xv, yv)
if(iter%(max_iter//10) == 1):
self.message("-"*17, "VALIDATION ITERATION", iter, "-"*17)
self._print_current_evals()
if(visualizer is not None):
animate(iter)
plt.pause(0.05)
obj_val = self.evals[obj][1][-1]
if(obj_val < min_obj_val):
if(min_obj_val-obj_val < cvrg_tol):
cvrg_iter += 1
else:
cvrg_iter = 0
min_obj_val = obj_val
self.min_obj_ind = len(self.evals['COST'][1])-1
argmin_params = self.params.copy()
else:
cvrg_iter += 1
if(iter > 30 and cvrg_iter > max_cvrg):
break
elif(cvrg_iter > max_cvrg*0.5):
randp = np.random.rand()*cvrg_iter/max_cvrg*0.5
self.params = (1-randp)*self.params+randp*argmin_params
self.params = argmin_params.copy()
cost, self.alpha, self.Li = self.train_func(self.X, self.y)
self.evals['COST'][1].append(np.double(cost))
self.evals['TIME(s)'][1].append(time.time()-train_start_time)
if(Xv is not None and yv is not None):
self.predict(Xv, yv)
self.min_obj_ind = len(self.evals['COST'][1])-1
disp = self.verbose
self.verbose = True
self.message("-"*19, "OPTIMIZATION RESULT", "-"*20)
self._print_current_evals()
self.message("-"*60)
self.verbose = disp
def predict(self, Xs, ys=None):
self.Xs = self.X_scaler.forward_transform(Xs)
mu_f, std_f = self.pred_func(self.Xs, self.alpha, self.Li)
mu_y = self.y_scaler.backward_transform(mu_f)
up_bnd_y = self.y_scaler.backward_transform(mu_f+std_f[:, None])
dn_bnd_y = self.y_scaler.backward_transform(mu_f-std_f[:, None])
std_y = 0.5*(up_bnd_y-dn_bnd_y)
if(ys is not None):
self.evals['MAE'][1].append(np.mean(np.abs(mu_y-ys)))
self.evals['NMAE'][1].append(self.evals['MAE'][1][-1]/np.std(ys))
self.evals['MSE'][1].append(np.mean((mu_y-ys)**2.))
self.evals['NMSE'][1].append(self.evals['MSE'][1][-1]/np.var(ys))
self.evals['MNLP'][1].append(0.5*np.mean(((
ys-mu_y)/std_y)**2+np.log(2*np.pi*std_y**2)))
self.evals['SCORE'][1].append(
self.evals['NMSE'][1][-1]/(1+np.exp(-self.evals['MNLP'][1][-1])))
return mu_y, std_y
def save(self, path):
import pickle
save_vars = ['ID', 'M', 'X_scaler', 'y_scaler', 'train_func',
'pred_func', 'params', 'alpha', 'Li', 'evals']
save_dict = {varn: self.__dict__[varn] for varn in save_vars}
with open(path, "wb") as save_f:
pickle.dump(save_dict, save_f, pickle.HIGHEST_PROTOCOL)
def load(self, path):
import pickle
with open(path, "rb") as load_f:
load_dict = pickle.load(load_f)
for varn, var in load_dict.items():
self.__dict__[varn] = var
self.NAME = "SCFGP (Sparsity=%d, Fourier Features=%d)"%(self.S, self.M)
def _print_current_evals(self):
for metric in sorted(self.evals.keys()):
if(len(self.evals[metric][1]) < len(self.evals['COST'][1])):
continue
best_perform_eval = self.evals[metric][1][self.min_obj_ind]
self.message(self.NAME, "%7s = %.4e"%(metric, best_perform_eval))
| bsd-3-clause |
finfou/tushare | tushare/stock/trading.py | 1 | 23685 | # -*- coding:utf-8 -*-
"""
交易数据接口
Created on 2014/07/31
@author: Jimmy Liu
@group : waditu
@contact: jimmysoa@sina.cn
"""
from __future__ import division
import time
import json
import lxml.html
from lxml import etree
import pandas as pd
import numpy as np
from tushare.stock import cons as ct
import re
from pandas.compat import StringIO
from tushare.util import dateu as du
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def get_hist_data(code=None, start=None, end=None,
ktype='D', retry_count=3,
pause=0.001):
"""
获取个股历史交易记录
Parameters
------
code:string
股票代码 e.g. 600848
start:string
开始日期 format:YYYY-MM-DD 为空时取到API所提供的最早日期数据
end:string
结束日期 format:YYYY-MM-DD 为空时取到最近一个交易日数据
ktype:string
数据类型,D=日k线 W=周 M=月 5=5分钟 15=15分钟 30=30分钟 60=60分钟,默认为D
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame
属性:日期 ,开盘价, 最高价, 收盘价, 最低价, 成交量, 价格变动 ,涨跌幅,5日均价,10日均价,20日均价,5日均量,10日均量,20日均量,换手率
"""
symbol = _code_to_symbol(code)
url = ''
if ktype.upper() in ct.K_LABELS:
url = ct.DAY_PRICE_URL%(ct.P_TYPE['http'], ct.DOMAINS['ifeng'],
ct.K_TYPE[ktype.upper()], symbol)
elif ktype in ct.K_MIN_LABELS:
url = ct.DAY_PRICE_MIN_URL%(ct.P_TYPE['http'], ct.DOMAINS['ifeng'],
symbol, ktype)
else:
raise TypeError('ktype input error.')
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(url)
lines = urlopen(request, timeout = 10).read()
if len(lines) < 15: #no data
return None
except Exception as e:
print(e)
else:
js = json.loads(lines.decode('utf-8') if ct.PY3 else lines)
cols = []
if (code in ct.INDEX_LABELS) & (ktype.upper() in ct.K_LABELS):
cols = ct.INX_DAY_PRICE_COLUMNS
else:
cols = ct.DAY_PRICE_COLUMNS
if len(js['record'][0]) == 14:
cols = ct.INX_DAY_PRICE_COLUMNS
df = pd.DataFrame(js['record'], columns=cols)
if ktype.upper() in ['D', 'W', 'M']:
df = df.applymap(lambda x: x.replace(u',', u''))
df[df==''] = 0
for col in cols[1:]:
df[col] = df[col].astype(float)
if start is not None:
df = df[df.date >= start]
if end is not None:
df = df[df.date <= end]
if (code in ct.INDEX_LABELS) & (ktype in ct.K_MIN_LABELS):
df = df.drop('turnover', axis=1)
df = df.set_index('date')
df = df.sort_index(ascending = False)
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def _parsing_dayprice_json(pageNum=1):
"""
处理当日行情分页数据,格式为json
Parameters
------
pageNum:页码
return
-------
DataFrame 当日所有股票交易数据(DataFrame)
"""
ct._write_console()
request = Request(ct.SINA_DAY_PRICE_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
ct.PAGES['jv'], pageNum))
text = urlopen(request, timeout=10).read()
if text == 'null':
return None
reg = re.compile(r'\,(.*?)\:')
text = reg.sub(r',"\1":', text.decode('gbk') if ct.PY3 else text)
text = text.replace('"{symbol', '{"symbol')
text = text.replace('{symbol', '{"symbol"')
if ct.PY3:
jstr = json.dumps(text)
else:
jstr = json.dumps(text, encoding='GBK')
js = json.loads(jstr)
df = pd.DataFrame(pd.read_json(js, dtype={'code':object}),
columns=ct.DAY_TRADING_COLUMNS)
df = df.drop('symbol', axis=1)
df = df.ix[df.volume > 0]
return df
def get_tick_data(code=None, date=None, retry_count=3, pause=0.001):
"""
获取分笔数据
Parameters
------
code:string
股票代码 e.g. 600848
date:string
日期 format:YYYY-MM-DD
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame 当日所有股票交易数据(DataFrame)
属性:成交时间、成交价格、价格变动,成交手、成交金额(元),买卖类型
"""
if code is None or len(code)!=6 or date is None:
return None
symbol = _code_to_symbol(code)
for _ in range(retry_count):
time.sleep(pause)
try:
re = Request(ct.TICK_PRICE_URL % (ct.P_TYPE['http'], ct.DOMAINS['sf'], ct.PAGES['dl'],
date, symbol))
lines = urlopen(re, timeout=10).read()
lines = lines.decode('GBK')
if len(lines) < 100:
return None
df = pd.read_table(StringIO(lines), names=ct.TICK_COLUMNS,
skiprows=[0])
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def get_sina_dd(code=None, date=None, retry_count=3, pause=0.001):
"""
获取sina大单数据
Parameters
------
code:string
股票代码 e.g. 600848
date:string
日期 format:YYYY-MM-DD
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame 当日所有股票交易数据(DataFrame)
属性:股票代码 股票名称 交易时间 价格 成交量 前一笔价格 类型(买、卖、中性盘)
"""
if code is None or len(code)!=6 or date is None:
return None
symbol = _code_to_symbol(code)
for _ in range(retry_count):
time.sleep(pause)
try:
re = Request(ct.SINA_DD % (ct.P_TYPE['http'], ct.DOMAINS['vsf'], ct.PAGES['sinadd'],
symbol, date))
lines = urlopen(re, timeout=10).read()
lines = lines.decode('GBK')
if len(lines) < 100:
return None
df = pd.read_csv(StringIO(lines), names=ct.SINA_DD_COLS,
skiprows=[0])
if df is not None:
df['code'] = df['code'].map(lambda x: x[2:])
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def get_today_ticks(code=None, retry_count=3, pause=0.001):
"""
获取当日分笔明细数据
Parameters
------
code:string
股票代码 e.g. 600848
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
return
-------
DataFrame 当日所有股票交易数据(DataFrame)
属性:成交时间、成交价格、价格变动,成交手、成交金额(元),买卖类型
"""
if code is None or len(code)!=6 :
return None
symbol = _code_to_symbol(code)
date = du.today()
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(ct.TODAY_TICKS_PAGE_URL % (ct.P_TYPE['http'], ct.DOMAINS['vsf'],
ct.PAGES['jv'], date,
symbol))
data_str = urlopen(request, timeout=10).read()
data_str = data_str.decode('GBK')
data_str = data_str[1:-1]
data_str = eval(data_str, type('Dummy', (dict,),
dict(__getitem__ = lambda s, n:n))())
data_str = json.dumps(data_str)
data_str = json.loads(data_str)
pages = len(data_str['detailPages'])
data = pd.DataFrame()
ct._write_head()
for pNo in range(1, pages+1):
data = data.append(_today_ticks(symbol, date, pNo,
retry_count, pause), ignore_index=True)
except Exception as er:
print(str(er))
else:
return data
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def _today_ticks(symbol, tdate, pageNo, retry_count, pause):
ct._write_console()
for _ in range(retry_count):
time.sleep(pause)
try:
html = lxml.html.parse(ct.TODAY_TICKS_URL % (ct.P_TYPE['http'],
ct.DOMAINS['vsf'], ct.PAGES['t_ticks'],
symbol, tdate, pageNo
))
res = html.xpath('//table[@id=\"datatbl\"]/tbody/tr')
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
sarr = '<table>%s</table>'%sarr
sarr = sarr.replace('--', '0')
df = pd.read_html(StringIO(sarr), parse_dates=False)[0]
df.columns = ct.TODAY_TICK_COLUMNS
df['pchange'] = df['pchange'].map(lambda x : x.replace('%', ''))
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def get_today_all():
"""
一次性获取最近一个日交易日所有股票的交易数据
return
-------
DataFrame
属性:代码,名称,涨跌幅,现价,开盘价,最高价,最低价,最日收盘价,成交量,换手率
"""
ct._write_head()
df = _parsing_dayprice_json(1)
if df is not None:
for i in range(2, ct.PAGE_NUM[0]):
newdf = _parsing_dayprice_json(i)
df = df.append(newdf, ignore_index=True)
return df
def get_realtime_quotes(symbols=None):
"""
获取实时交易数据 getting real time quotes data
用于跟踪交易情况(本次执行的结果-上一次执行的数据)
Parameters
------
symbols : string, array-like object (list, tuple, Series).
return
-------
DataFrame 实时交易数据
属性:0:name,股票名字
1:open,今日开盘价
2:pre_close,昨日收盘价
3:price,当前价格
4:high,今日最高价
5:low,今日最低价
6:bid,竞买价,即“买一”报价
7:ask,竞卖价,即“卖一”报价
8:volumn,成交量 maybe you need do volumn/100
9:amount,成交金额(元 CNY)
10:b1_v,委买一(笔数 bid volume)
11:b1_p,委买一(价格 bid price)
12:b2_v,“买二”
13:b2_p,“买二”
14:b3_v,“买三”
15:b3_p,“买三”
16:b4_v,“买四”
17:b4_p,“买四”
18:b5_v,“买五”
19:b5_p,“买五”
20:a1_v,委卖一(笔数 ask volume)
21:a1_p,委卖一(价格 ask price)
...
30:date,日期;
31:time,时间;
"""
symbols_list = ''
if isinstance(symbols, list) or isinstance(symbols, set) or isinstance(symbols, tuple) or isinstance(symbols, pd.Series):
for code in symbols:
symbols_list += _code_to_symbol(code) + ','
else:
symbols_list = _code_to_symbol(symbols)
symbols_list = symbols_list[:-1] if len(symbols_list) > 8 else symbols_list
request = Request(ct.LIVE_DATA_URL%(ct.P_TYPE['http'], ct.DOMAINS['sinahq'],
_random(), symbols_list))
text = urlopen(request,timeout=10).read()
text = text.decode('GBK')
reg = re.compile(r'\="(.*?)\";')
data = reg.findall(text)
regSym = re.compile(r'(?:sh|sz)(.*?)\=')
syms = regSym.findall(text)
data_list = []
syms_list = []
for index, row in enumerate(data):
if len(row)>1:
data_list.append([astr for astr in row.split(',')])
syms_list.append(syms[index])
if len(syms_list) == 0:
return None
df = pd.DataFrame(data_list, columns=ct.LIVE_DATA_COLS)
df = df.drop('s', axis=1)
df['code'] = syms_list
ls = [cls for cls in df.columns if '_v' in cls]
for txt in ls:
df[txt] = df[txt].map(lambda x : x[:-2])
return df
def get_h_data(code, start=None, end=None, autype='qfq',
index=False, retry_count=3, pause=0.001, drop_factor=True):
'''
获取历史复权数据
Parameters
------
code:string
股票代码 e.g. 600848
start:string
开始日期 format:YYYY-MM-DD 为空时取当前日期
end:string
结束日期 format:YYYY-MM-DD 为空时取去年今日
autype:string
复权类型,qfq-前复权 hfq-后复权 None-不复权,默认为qfq
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
drop_factor : bool, 默认 True
是否移除复权因子,在分析过程中可能复权因子意义不大,但是如需要先储存到数据库之后再分析的话,有该项目会更加灵活
return
-------
DataFrame
date 交易日期 (index)
open 开盘价
high 最高价
close 收盘价
low 最低价
volume 成交量
amount 成交金额
'''
start = du.today_last_year() if start is None else start
end = du.today() if end is None else end
qs = du.get_quarts(start, end)
qt = qs[0]
ct._write_head()
data = _parse_fq_data(_get_index_url(index, code, qt), index,
retry_count, pause)
if len(qs)>1:
for d in range(1, len(qs)):
qt = qs[d]
ct._write_console()
df = _parse_fq_data(_get_index_url(index, code, qt), index,
retry_count, pause)
data = data.append(df, ignore_index=True)
if len(data) == 0 or len(data[(data.date>=start)&(data.date<=end)]) == 0:
return None
data = data.drop_duplicates('date')
if index:
data = data[(data.date>=start) & (data.date<=end)]
data = data.set_index('date')
data = data.sort_index(ascending=False)
return data
if autype == 'hfq':
if drop_factor:
data = data.drop('factor', axis=1)
data = data[(data.date>=start) & (data.date<=end)]
for label in ['open', 'high', 'close', 'low']:
data[label] = data[label].map(ct.FORMAT)
data[label] = data[label].astype(float)
data = data.set_index('date')
data = data.sort_index(ascending = False)
return data
else:
if autype == 'qfq':
if drop_factor:
data = data.drop('factor', axis=1)
df = _parase_fq_factor(code, start, end)
df = df.drop_duplicates('date')
df = df.sort('date', ascending=False)
frow = df.head(1)
rt = get_realtime_quotes(code)
if rt is None:
return None
if ((float(rt['high']) == 0) & (float(rt['low']) == 0)):
preClose = float(rt['pre_close'])
else:
if du.is_holiday(du.today()):
preClose = float(rt['price'])
else:
if (du.get_hour() > 9) & (du.get_hour() < 18):
preClose = float(rt['pre_close'])
else:
preClose = float(rt['price'])
rate = float(frow['factor']) / preClose
data = data[(data.date >= start) & (data.date <= end)]
for label in ['open', 'high', 'low', 'close']:
data[label] = data[label] / rate
data[label] = data[label].map(ct.FORMAT)
data[label] = data[label].astype(float)
data = data.set_index('date')
data = data.sort_index(ascending = False)
return data
else:
for label in ['open', 'high', 'close', 'low']:
data[label] = data[label] / data['factor']
if drop_factor:
data = data.drop('factor', axis=1)
data = data[(data.date>=start) & (data.date<=end)]
for label in ['open', 'high', 'close', 'low']:
data[label] = data[label].map(ct.FORMAT)
data = data.set_index('date')
data = data.sort_index(ascending = False)
data = data.astype(float)
return data
def _parase_fq_factor(code, start, end):
symbol = _code_to_symbol(code)
request = Request(ct.HIST_FQ_FACTOR_URL%(ct.P_TYPE['http'],
ct.DOMAINS['vsf'], symbol))
text = urlopen(request, timeout=10).read()
text = text[1:len(text)-1]
text = text.decode('utf-8') if ct.PY3 else text
text = text.replace('{_', '{"')
text = text.replace('total', '"total"')
text = text.replace('data', '"data"')
text = text.replace(':"', '":"')
text = text.replace('",_', '","')
text = text.replace('_', '-')
text = json.loads(text)
df = pd.DataFrame({'date':list(text['data'].keys()), 'factor':list(text['data'].values())})
df['date'] = df['date'].map(_fun_except) # for null case
if df['date'].dtypes == np.object:
df['date'] = df['date'].astype(np.datetime64)
df = df.drop_duplicates('date')
df['factor'] = df['factor'].astype(float)
return df
def _fun_except(x):
if len(x) > 10:
return x[-10:]
else:
return x
def _parse_fq_data(url, index, retry_count, pause):
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(url)
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
html = lxml.html.parse(StringIO(text))
res = html.xpath('//table[@id=\"FundHoldSharesTable\"]')
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
df = pd.read_html(sarr, skiprows = [0, 1])[0]
if len(df) == 0:
return pd.DataFrame()
if index:
df.columns = ct.HIST_FQ_COLS[0:7]
else:
df.columns = ct.HIST_FQ_COLS
if df['date'].dtypes == np.object:
df['date'] = df['date'].astype(np.datetime64)
df = df.drop_duplicates('date')
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG)
def get_index():
"""
获取大盘指数行情
return
-------
DataFrame
code:指数代码
name:指数名称
change:涨跌幅
open:开盘价
preclose:昨日收盘价
close:收盘价
high:最高价
low:最低价
volume:成交量(手)
amount:成交金额(亿元)
"""
request = Request(ct.INDEX_HQ_URL%(ct.P_TYPE['http'],
ct.DOMAINS['sinahq']))
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
text = text.replace('var hq_str_sh', '').replace('var hq_str_sz', '')
text = text.replace('";', '').replace('"', '').replace('=', ',')
text = '%s%s'%(ct.INDEX_HEADER, text)
df = pd.read_csv(StringIO(text), sep=',', thousands=',')
df['change'] = (df['close'] / df['preclose'] - 1 ) * 100
df['amount'] = df['amount'] / 100000000
df['change'] = df['change'].map(ct.FORMAT)
df['amount'] = df['amount'].map(ct.FORMAT)
df = df[ct.INDEX_COLS]
df['code'] = df['code'].map(lambda x:str(x).zfill(6))
df['change'] = df['change'].astype(float)
df['amount'] = df['amount'].astype(float)
return df
def _get_index_url(index, code, qt):
if index:
url = ct.HIST_INDEX_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
code, qt[0], qt[1])
else:
url = ct.HIST_FQ_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
code, qt[0], qt[1])
return url
def get_hists(symbols, start=None, end=None,
ktype='D', retry_count=3,
pause=0.001):
"""
批量获取历史行情数据,具体参数和返回数据类型请参考get_hist_data接口
"""
df = pd.DataFrame()
if isinstance(symbols, list) or isinstance(symbols, set) or isinstance(symbols, tuple) or isinstance(symbols, pd.Series):
for symbol in symbols:
data = get_hist_data(symbol, start=start, end=end,
ktype=ktype, retry_count=retry_count,
pause=pause)
data['code'] = symbol
df = df.append(data, ignore_index=True)
return df
else:
return None
def _random(n=13):
from random import randint
start = 10**(n-1)
end = (10**n)-1
return str(randint(start, end))
def _code_to_symbol(code):
"""
生成symbol代码标志
"""
if code in ct.INDEX_LABELS:
return ct.INDEX_LIST[code]
else:
if len(code) != 6 :
return ''
else:
return 'sh%s'%code if code[:1] in ['5', '6', '9'] else 'sz%s'%code
| bsd-3-clause |
jmausolf/Python_Tutorials | PostgreSQL_with_Python/prepare.py | 1 | 10406 | import sys
import os
import pandas as pd
import subprocess
import argparse
import pdb
import pickle
from setup import setup_environment
"""
Code to take top performing recent models and
put them in the evaluation webapp for further
examination.
Examples:
--------
python prepare.py '2016-08-03' 'auc'
python prepare.py '2016-08-03' 'recall@' -p '0.01'
python prepare.py '2016-08-03' 'precision@' -p '10.0' -n 10
python prepare.py '2016-08-03' 'precision@' -p '10.0' -n 10 -d 'example_directory/'
"""
engine = setup_environment.get_database()
try:
con = engine.raw_connection()
con.cursor().execute("SET SCHEMA '{}'".format('models'))
except:
pass
def get_metric_best_models(timestamp, metric, parameter=None, number=25):
"""
--------------------------------------------------------
Get the EVALUATION METRIC VALUE of the best models
by the specified timestamp and given metric
--------------------------------------------------------
ARGUMENTS:
timestamp: models run on or after given timestamp
example: '2016-08-03'
metric: metric to be optimized
example: 'precision@'
parameter: parameter value or threshold if any
default=None
example: '10.0'
number: maximum number of desired results
default = 25
--------------------------------------------------------
"""
if parameter is None:
query = (" SELECT value FROM results.evaluations JOIN results.models \
ON evaluations.model_id=models.model_id \
WHERE run_time >= '{}' \
AND value is not null \
AND metric = '{}' \
ORDER BY value DESC LIMIT {} ; ").format(timestamp, metric, number)
elif parameter is not None:
query = (" SELECT value FROM results.evaluations JOIN results.models \
ON evaluations.model_id=models.model_id \
WHERE run_time >= '{}' \
AND value is not null \
AND metric = '{}' \
AND parameter = '{}' \
ORDER BY value DESC LIMIT {} ; ").format(timestamp, metric, parameter, number)
df_models = pd.read_sql(query, con=con)
output = df_models["value"].apply(lambda x: str(x)).values
statement = "Resulting metric for models with best {} run on or after {}: \n".format(metric, timestamp)
print (statement, output)
return output
def get_best_models_id(timestamp, metric, parameter=None, number=25):
"""
--------------------------------------------------------
Get the MODEL ID of the best models
by the specified timestamp and given metric
--------------------------------------------------------
ARGUMENTS:
timestamp: models run on or after given timestamp
example: '2016-08-03'
metric: metric to be optimized
example: 'precision@'
parameter: parameter value or threshold if any
default=None
example: '10.0'
number: maximum number of desired results
default = 25
--------------------------------------------------------
"""
if parameter is None:
query = (" SELECT run_time FROM results.evaluations JOIN results.models \
ON evaluations.model_id=models.model_id \
WHERE run_time >= '{}' \
AND value is not null \
AND metric = '{}' \
ORDER BY value DESC LIMIT {} ; ").format(timestamp, metric, number)
elif parameter is not None:
query = (" SELECT run_time FROM results.evaluations JOIN results.models \
ON evaluations.model_id=models.model_id \
WHERE run_time >= '{}' \
AND value is not null \
AND metric = '{}' \
AND parameter = '{}' \
ORDER BY value DESC LIMIT {} ; ").format(timestamp, metric, parameter, number)
df_models = pd.read_sql(query, con=con)
output = df_models['run_time'].apply(lambda x: str(x).replace(' ', 'T')).values
print(output)
return output
def get_best_models(timestamp, metric, parameter=None, number=25):
"""
--------------------------------------------------------
Get the REPORT of the best models
by the specified timestamp and given metric
RETURNS RUN TIME, MODEL TYPE, METRIC, and VALUE
OR
RUN TIME, MODEL TYPE, METRIC, PARAMETER, and VALUE
--------------------------------------------------------
ARGUMENTS:
timestamp: models run on or after given timestamp
example: '2016-08-03'
metric: metric to be optimized
example: 'precision@'
parameter: parameter value or threshold if any
default=None
example: '10.0'
number: maximum number of desired results
default = 25
--------------------------------------------------------
"""
if parameter is None:
query = (" SELECT run_time, model_type, metric, value FROM results.evaluations JOIN results.models \
ON evaluations.model_id=models.model_id \
WHERE run_time >= '{}' \
AND value is not null \
AND metric = '{}' \
ORDER BY value DESC LIMIT {} ; ").format(timestamp, metric, number)
elif parameter is not None:
query = (" SELECT run_time, model_type, metric, parameter, value FROM results.evaluations JOIN results.models \
ON evaluations.model_id=models.model_id \
WHERE run_time >= '{}' \
AND value is not null \
AND metric = '{}' \
AND parameter = '{}' \
ORDER BY value DESC LIMIT {} ; ").format(timestamp, metric, parameter, number)
df_models = pd.read_sql(query, con=con)
output = df_models
statement = "Resulting top models with best {} run on or after {}: \n".format(metric, timestamp)
print (statement, output)
return output
def get_pickle_best_models(timestamp, metric, parameter=None, number=25, directory="results/"):
"""
--------------------------------------------------------
Get the PICKLE FILE of the best models
by the specified timestamp and given metric
RETURNS the PICKLE FILE to a DIRECTORY
--------------------------------------------------------
ARGUMENTS:
timestamp: models run on or after given timestamp
example: '2016-08-03'
metric: metric to be optimized
example: 'precision@'
parameter: parameter value or threshold if any
default=None
example: '10.0'
number: maximum number of desired results
default = 25
--------------------------------------------------------
"""
if parameter is None:
query = ("SELECT pickle_blob, run_time FROM \
(SELECT evaluations.model_id, run_time \
FROM results.evaluations JOIN results.models \
ON evaluations.model_id=models.model_id \
WHERE run_time >= '{}' \
AND value is not null \
AND metric = '{}' \
ORDER BY value DESC LIMIT {}) \
AS top_models \
INNER JOIN results.data \
ON top_models.model_id=data.model_id ; " ).format(timestamp, metric, number)
elif parameter is not None:
query = ("SELECT pickle_blob, run_time FROM \
(SELECT evaluations.model_id, run_time \
FROM results.evaluations JOIN results.models \
ON evaluations.model_id=models.model_id \
WHERE run_time >= '{}' \
AND value is not null \
AND metric = '{}' \
AND parameter = '{}' \
ORDER BY value DESC LIMIT {}) \
AS top_models \
INNER JOIN results.data \
ON top_models.model_id=data.model_id ; " ).format(timestamp, metric, parameter, number)
df_models = pd.read_sql(query, con=con)
N = len(df_models['pickle_blob'])
for file_number in range(0, N):
pickle_file = pickle.loads(df_models['pickle_blob'].iloc[file_number])
file_name = df_models['run_time'].apply(lambda x: str(x).replace(' ', 'T')).iloc[file_number]
if parameter is None:
full_file_name = "police_eis_results_"+"top_"+metric+"any"+"_"+file_name+".pkl"
elif parameter is not None:
full_file_name = "police_eis_results_"+"top_"+metric+parameter+"_"+file_name+".pkl"
file_path = directory+full_file_name
pickle.dump(pickle_file, open( file_path, "wb" ) )
return None
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument("timestamp", type=str, help="show models more recent than a given timestamp")
parser.add_argument("metric", type=str, help="specify a desired metric to optimize")
parser.add_argument("-p", "--parameter", default=None, type=str, help="specify a desired parameter or threshold for your metric, default=None")
parser.add_argument("-n", "--number", default=25, type=int, help="maximum number of results to return, default=25")
parser.add_argument("-d", "--directory", default="results/", type=str, help="file directory for pickle files, default='results/'")
args = parser.parse_args()
print("[*] Updating model list...")
models = get_best_models(args.timestamp, args.metric, args.parameter, args.number)
print("[*] Dumping requested pickle files to results...")
pickles = get_pickle_best_models(args.timestamp, args.metric, args.parameter, args.number, args.directory)
print("[*] Done!")
| mit |
scott-maddox/simplepl | setup.py | 1 | 3692 | #
# Copyright (c) 2014, Scott J Maddox
#
# This file is part of Plot Liberator.
#
# Plot Liberator is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Plot Liberator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Plot Liberator. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
# std lib imports
import sys
import os.path
# third party imports
import glob
from setuptools import setup, find_packages
# read in __version__
exec(open('src/simplepl/version.py').read())
# If on Mac OS X, build an app bundle using py2app
if sys.platform == 'darwin':
# extra arguments for mac py2app to associate files
plist = dict(
CFBundleName='SimplePL',
CFBundleShortVersionString=__version__,
CFBundleIdentifier='org.python.simplepl',
)
py2app_opts = dict(
argv_emulation=False,
includes=['PySide',
'PySide.QtCore',
'PySide.QtGui',
'pyqtgraph',
'scipy.interpolate',
'single_process',
'serial'],
excludes=['PySide.QtNetwork',
'wxpython',
'matplotlib',
'zmq',
'lib-dynload',
'numpy.linalg',
],
plist=plist,
iconfile='src/simplepl/resources/icon.icns',
)
extra_options = dict(
setup_requires=['py2app'],
app=['src/simplepl/main.py'],
options=dict(
py2app=py2app_opts
)
)
elif sys.platform == 'win32':
extra_options = dict(
setup_requires=['py2exe'],
windows = [
dict(script='with_gui.py',
icon_resources=[(1, 'src/simplepl/resources/icon.ico')])
]
)
else:
extra_options = dict(
# Normally unix-like platforms will use "setup.py install"
# and install the main script as such
scripts=['src/simplepl/main.py'],
)
setup(name='simplepl',
version=__version__, # read from version.py
description='a simple python gui for taking ' \
'photoluminescence (PL) spectra',
long_description=open('README.rst').read(),
url='http://scott-maddox.github.io/simplepl',
author='Scott J. Maddox',
author_email='smaddox@utexas.edu',
license='AGPLv3',
packages=['simplepl',
'simplepl.dialogs',
'simplepl.instruments',
'simplepl.instruments.drivers',
],
package_dir={'simplepl': 'src/simplepl'},
zip_safe=True,
**extra_options)
| agpl-3.0 |
szredinger/graph-constr-group-testing | graph_constr_group_testing/results_analyser.py | 1 | 1187 | import collections
import csv
from graph_constr_group_testing.core import base_types
import pandas
def averageQueriesForSize(results):
result = []
count = collections.defaultdict(int)
sumallqueries = collections.defaultdict(int)
for solver, problem, statistics in results:
n = base_types.size_of_problem(problem)
count[n] += 1
sumallqueries[n] += statistics.get_var('all')
for k, v in sumallqueries.iteritems():
result.append((k, float(v)/count[k]))
return zip(*sorted(result, key=lambda x: x[0]))
class CsvStats(base_types.ExperimentStatistics):
def __init__(self, csvFileName, renderers=None):
super(CsvStats, self).__init__(renderers)
self._fileName = csvFileName
def process(self):
with open(self._fileName, 'wc') as csvFile:
writer = csv.DictWriter(csvFile, list(self.headers))
writer.writeheader()
writer.writerows(self.results)
class PandasStats(base_types.ExperimentStatistics):
def __init__(self, renderers=None):
super(PandasStats, self).__init__(renderers)
def get_dataframe(self):
return pandas.DataFrame(self.results)
| mit |
pythonvietnam/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 268 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
HesselTjeerdsma/Cyber-Physical-Pacman-Game | Algor/flask/lib/python2.7/site-packages/scipy/integrate/quadrature.py | 20 | 28269 | from __future__ import division, print_function, absolute_import
import numpy as np
import math
import warnings
# trapz is a public function for scipy.integrate,
# even though it's actually a numpy function.
from numpy import trapz
from scipy.special import roots_legendre
from scipy.special import gammaln
from scipy._lib.six import xrange
__all__ = ['fixed_quad', 'quadrature', 'romberg', 'trapz', 'simps', 'romb',
'cumtrapz', 'newton_cotes']
class AccuracyWarning(Warning):
pass
def _cached_roots_legendre(n):
"""
Cache roots_legendre results to speed up calls of the fixed_quad
function.
"""
if n in _cached_roots_legendre.cache:
return _cached_roots_legendre.cache[n]
_cached_roots_legendre.cache[n] = roots_legendre(n)
return _cached_roots_legendre.cache[n]
_cached_roots_legendre.cache = dict()
def fixed_quad(func, a, b, args=(), n=5):
"""
Compute a definite integral using fixed-order Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature of
order `n`.
Parameters
----------
func : callable
A Python function or method to integrate (must accept vector inputs).
If integrating a vector-valued function, the returned array must have
shape ``(..., len(x))``.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function, if any.
n : int, optional
Order of quadrature integration. Default is 5.
Returns
-------
val : float
Gaussian quadrature approximation to the integral
none : None
Statically returned value of None
See Also
--------
quad : adaptive quadrature using QUADPACK
dblquad : double integrals
tplquad : triple integrals
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
romb : integrators for sampled data
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrator
odeint : ODE integrator
"""
x, w = _cached_roots_legendre(n)
x = np.real(x)
if np.isinf(a) or np.isinf(b):
raise ValueError("Gaussian quadrature is only available for "
"finite limits.")
y = (b-a)*(x+1)/2.0 + a
return (b-a)/2.0 * np.sum(w*func(y, *args), axis=-1), None
def vectorize1(func, args=(), vec_func=False):
"""Vectorize the call to a function.
This is an internal utility function used by `romberg` and
`quadrature` to create a vectorized version of a function.
If `vec_func` is True, the function `func` is assumed to take vector
arguments.
Parameters
----------
func : callable
User defined function.
args : tuple, optional
Extra arguments for the function.
vec_func : bool, optional
True if the function func takes vector arguments.
Returns
-------
vfunc : callable
A function that will take a vector argument and return the
result.
"""
if vec_func:
def vfunc(x):
return func(x, *args)
else:
def vfunc(x):
if np.isscalar(x):
return func(x, *args)
x = np.asarray(x)
# call with first point to get output type
y0 = func(x[0], *args)
n = len(x)
dtype = getattr(y0, 'dtype', type(y0))
output = np.empty((n,), dtype=dtype)
output[0] = y0
for i in xrange(1, n):
output[i] = func(x[i], *args)
return output
return vfunc
def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50,
vec_func=True, miniter=1):
"""
Compute a definite integral using fixed-tolerance Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature
with absolute tolerance `tol`.
Parameters
----------
func : function
A Python function or method to integrate.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function.
tol, rtol : float, optional
Iteration stops when error between last two iterates is less than
`tol` OR the relative change is less than `rtol`.
maxiter : int, optional
Maximum order of Gaussian quadrature.
vec_func : bool, optional
True or False if func handles arrays as arguments (is
a "vector" function). Default is True.
miniter : int, optional
Minimum order of Gaussian quadrature.
Returns
-------
val : float
Gaussian quadrature approximation (within tolerance) to integral.
err : float
Difference between last two estimates of the integral.
See also
--------
romberg: adaptive Romberg quadrature
fixed_quad: fixed-order Gaussian quadrature
quad: adaptive quadrature using QUADPACK
dblquad: double integrals
tplquad: triple integrals
romb: integrator for sampled data
simps: integrator for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrator
odeint: ODE integrator
"""
if not isinstance(args, tuple):
args = (args,)
vfunc = vectorize1(func, args, vec_func=vec_func)
val = np.inf
err = np.inf
maxiter = max(miniter+1, maxiter)
for n in xrange(miniter, maxiter+1):
newval = fixed_quad(vfunc, a, b, (), n)[0]
err = abs(newval-val)
val = newval
if err < tol or err < rtol*abs(val):
break
else:
warnings.warn(
"maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err),
AccuracyWarning)
return val, err
def tupleset(t, i, value):
l = list(t)
l[i] = value
return tuple(l)
def cumtrapz(y, x=None, dx=1.0, axis=-1, initial=None):
"""
Cumulatively integrate y(x) using the composite trapezoidal rule.
Parameters
----------
y : array_like
Values to integrate.
x : array_like, optional
The coordinate to integrate along. If None (default), use spacing `dx`
between consecutive elements in `y`.
dx : float, optional
Spacing between elements of `y`. Only used if `x` is None.
axis : int, optional
Specifies the axis to cumulate. Default is -1 (last axis).
initial : scalar, optional
If given, uses this value as the first value in the returned result.
Typically this value should be 0. Default is None, which means no
value at ``x[0]`` is returned and `res` has one element less than `y`
along the axis of integration.
Returns
-------
res : ndarray
The result of cumulative integration of `y` along `axis`.
If `initial` is None, the shape is such that the axis of integration
has one less value than `y`. If `initial` is given, the shape is equal
to that of `y`.
See Also
--------
numpy.cumsum, numpy.cumprod
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
ode: ODE integrators
odeint: ODE integrators
Examples
--------
>>> from scipy import integrate
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2, 2, num=20)
>>> y = x
>>> y_int = integrate.cumtrapz(y, x, initial=0)
>>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-')
>>> plt.show()
"""
y = np.asarray(y)
if x is None:
d = dx
else:
x = np.asarray(x)
if x.ndim == 1:
d = np.diff(x)
# reshape to correct shape
shape = [1] * y.ndim
shape[axis] = -1
d = d.reshape(shape)
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
else:
d = np.diff(x, axis=axis)
if d.shape[axis] != y.shape[axis] - 1:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
nd = len(y.shape)
slice1 = tupleset((slice(None),)*nd, axis, slice(1, None))
slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1))
res = np.cumsum(d * (y[slice1] + y[slice2]) / 2.0, axis=axis)
if initial is not None:
if not np.isscalar(initial):
raise ValueError("`initial` parameter should be a scalar.")
shape = list(res.shape)
shape[axis] = 1
res = np.concatenate([np.ones(shape, dtype=res.dtype) * initial, res],
axis=axis)
return res
def _basic_simps(y, start, stop, x, dx, axis):
nd = len(y.shape)
if start is None:
start = 0
step = 2
slice_all = (slice(None),)*nd
slice0 = tupleset(slice_all, axis, slice(start, stop, step))
slice1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
slice2 = tupleset(slice_all, axis, slice(start+2, stop+2, step))
if x is None: # Even spaced Simpson's rule.
result = np.sum(dx/3.0 * (y[slice0]+4*y[slice1]+y[slice2]),
axis=axis)
else:
# Account for possibly different spacings.
# Simpson's rule changes a bit.
h = np.diff(x, axis=axis)
sl0 = tupleset(slice_all, axis, slice(start, stop, step))
sl1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
h0 = h[sl0]
h1 = h[sl1]
hsum = h0 + h1
hprod = h0 * h1
h0divh1 = h0 / h1
tmp = hsum/6.0 * (y[slice0]*(2-1.0/h0divh1) +
y[slice1]*hsum*hsum/hprod +
y[slice2]*(2-h0divh1))
result = np.sum(tmp, axis=axis)
return result
def simps(y, x=None, dx=1, axis=-1, even='avg'):
"""
Integrate y(x) using samples along the given axis and the composite
Simpson's rule. If x is None, spacing of dx is assumed.
If there are an even number of samples, N, then there are an odd
number of intervals (N-1), but Simpson's rule requires an even number
of intervals. The parameter 'even' controls how this is handled.
Parameters
----------
y : array_like
Array to be integrated.
x : array_like, optional
If given, the points at which `y` is sampled.
dx : int, optional
Spacing of integration points along axis of `y`. Only used when
`x` is None. Default is 1.
axis : int, optional
Axis along which to integrate. Default is the last axis.
even : str {'avg', 'first', 'last'}, optional
'avg' : Average two results:1) use the first N-2 intervals with
a trapezoidal rule on the last interval and 2) use the last
N-2 intervals with a trapezoidal rule on the first interval.
'first' : Use Simpson's rule for the first N-2 intervals with
a trapezoidal rule on the last interval.
'last' : Use Simpson's rule for the last N-2 intervals with a
trapezoidal rule on the first interval.
See Also
--------
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrators
odeint: ODE integrators
Notes
-----
For an odd number of samples that are equally spaced the result is
exact if the function is a polynomial of order 3 or less. If
the samples are not equally spaced, then the result is exact only
if the function is a polynomial of order 2 or less.
"""
y = np.asarray(y)
nd = len(y.shape)
N = y.shape[axis]
last_dx = dx
first_dx = dx
returnshape = 0
if x is not None:
x = np.asarray(x)
if len(x.shape) == 1:
shapex = [1] * nd
shapex[axis] = x.shape[0]
saveshape = x.shape
returnshape = 1
x = x.reshape(tuple(shapex))
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
if x.shape[axis] != N:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
if N % 2 == 0:
val = 0.0
result = 0.0
slice1 = (slice(None),)*nd
slice2 = (slice(None),)*nd
if even not in ['avg', 'last', 'first']:
raise ValueError("Parameter 'even' must be "
"'avg', 'last', or 'first'.")
# Compute using Simpson's rule on first intervals
if even in ['avg', 'first']:
slice1 = tupleset(slice1, axis, -1)
slice2 = tupleset(slice2, axis, -2)
if x is not None:
last_dx = x[slice1] - x[slice2]
val += 0.5*last_dx*(y[slice1]+y[slice2])
result = _basic_simps(y, 0, N-3, x, dx, axis)
# Compute using Simpson's rule on last set of intervals
if even in ['avg', 'last']:
slice1 = tupleset(slice1, axis, 0)
slice2 = tupleset(slice2, axis, 1)
if x is not None:
first_dx = x[tuple(slice2)] - x[tuple(slice1)]
val += 0.5*first_dx*(y[slice2]+y[slice1])
result += _basic_simps(y, 1, N-2, x, dx, axis)
if even == 'avg':
val /= 2.0
result /= 2.0
result = result + val
else:
result = _basic_simps(y, 0, N-2, x, dx, axis)
if returnshape:
x = x.reshape(saveshape)
return result
def romb(y, dx=1.0, axis=-1, show=False):
"""
Romberg integration using samples of a function.
Parameters
----------
y : array_like
A vector of ``2**k + 1`` equally-spaced samples of a function.
dx : float, optional
The sample spacing. Default is 1.
axis : int, optional
The axis along which to integrate. Default is -1 (last axis).
show : bool, optional
When `y` is a single 1-D array, then if this argument is True
print the table showing Richardson extrapolation from the
samples. Default is False.
Returns
-------
romb : ndarray
The integrated result for `axis`.
See also
--------
quad : adaptive quadrature using QUADPACK
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
fixed_quad : fixed-order Gaussian quadrature
dblquad : double integrals
tplquad : triple integrals
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrators
odeint : ODE integrators
"""
y = np.asarray(y)
nd = len(y.shape)
Nsamps = y.shape[axis]
Ninterv = Nsamps-1
n = 1
k = 0
while n < Ninterv:
n <<= 1
k += 1
if n != Ninterv:
raise ValueError("Number of samples must be one plus a "
"non-negative power of 2.")
R = {}
slice_all = (slice(None),) * nd
slice0 = tupleset(slice_all, axis, 0)
slicem1 = tupleset(slice_all, axis, -1)
h = Ninterv * np.asarray(dx, dtype=float)
R[(0, 0)] = (y[slice0] + y[slicem1])/2.0*h
slice_R = slice_all
start = stop = step = Ninterv
for i in xrange(1, k+1):
start >>= 1
slice_R = tupleset(slice_R, axis, slice(start, stop, step))
step >>= 1
R[(i, 0)] = 0.5*(R[(i-1, 0)] + h*y[slice_R].sum(axis=axis))
for j in xrange(1, i+1):
prev = R[(i, j-1)]
R[(i, j)] = prev + (prev-R[(i-1, j-1)]) / ((1 << (2*j))-1)
h /= 2.0
if show:
if not np.isscalar(R[(0, 0)]):
print("*** Printing table only supported for integrals" +
" of a single data set.")
else:
try:
precis = show[0]
except (TypeError, IndexError):
precis = 5
try:
width = show[1]
except (TypeError, IndexError):
width = 8
formstr = "%%%d.%df" % (width, precis)
title = "Richardson Extrapolation Table for Romberg Integration"
print("", title.center(68), "=" * 68, sep="\n", end="")
for i in xrange(k+1):
for j in xrange(i+1):
print(formstr % R[(i, j)], end=" ")
print()
print("=" * 68)
print()
return R[(k, k)]
# Romberg quadratures for numeric integration.
#
# Written by Scott M. Ransom <ransom@cfa.harvard.edu>
# last revision: 14 Nov 98
#
# Cosmetic changes by Konrad Hinsen <hinsen@cnrs-orleans.fr>
# last revision: 1999-7-21
#
# Adapted to scipy by Travis Oliphant <oliphant.travis@ieee.org>
# last revision: Dec 2001
def _difftrap(function, interval, numtraps):
"""
Perform part of the trapezoidal rule to integrate a function.
Assume that we had called difftrap with all lower powers-of-2
starting with 1. Calling difftrap only returns the summation
of the new ordinates. It does _not_ multiply by the width
of the trapezoids. This must be performed by the caller.
'function' is the function to evaluate (must accept vector arguments).
'interval' is a sequence with lower and upper limits
of integration.
'numtraps' is the number of trapezoids to use (must be a
power-of-2).
"""
if numtraps <= 0:
raise ValueError("numtraps must be > 0 in difftrap().")
elif numtraps == 1:
return 0.5*(function(interval[0])+function(interval[1]))
else:
numtosum = numtraps/2
h = float(interval[1]-interval[0])/numtosum
lox = interval[0] + 0.5 * h
points = lox + h * np.arange(numtosum)
s = np.sum(function(points), axis=0)
return s
def _romberg_diff(b, c, k):
"""
Compute the differences for the Romberg quadrature corrections.
See Forman Acton's "Real Computing Made Real," p 143.
"""
tmp = 4.0**k
return (tmp * c - b)/(tmp - 1.0)
def _printresmat(function, interval, resmat):
# Print the Romberg result matrix.
i = j = 0
print('Romberg integration of', repr(function), end=' ')
print('from', interval)
print('')
print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results'))
for i in xrange(len(resmat)):
print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ')
for j in xrange(i+1):
print('%9f' % (resmat[i][j]), end=' ')
print('')
print('')
print('The final result is', resmat[i][j], end=' ')
print('after', 2**(len(resmat)-1)+1, 'function evaluations.')
def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False,
divmax=10, vec_func=False):
"""
Romberg integration of a callable function or method.
Returns the integral of `function` (a function of one variable)
over the interval (`a`, `b`).
If `show` is 1, the triangular array of the intermediate results
will be printed. If `vec_func` is True (default is False), then
`function` is assumed to support vector arguments.
Parameters
----------
function : callable
Function to be integrated.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
Returns
-------
results : float
Result of the integration.
Other Parameters
----------------
args : tuple, optional
Extra arguments to pass to function. Each element of `args` will
be passed as a single argument to `func`. Default is to pass no
extra arguments.
tol, rtol : float, optional
The desired absolute and relative tolerances. Defaults are 1.48e-8.
show : bool, optional
Whether to print the results. Default is False.
divmax : int, optional
Maximum order of extrapolation. Default is 10.
vec_func : bool, optional
Whether `func` handles arrays as arguments (i.e whether it is a
"vector" function). Default is False.
See Also
--------
fixed_quad : Fixed-order Gaussian quadrature.
quad : Adaptive quadrature using QUADPACK.
dblquad : Double integrals.
tplquad : Triple integrals.
romb : Integrators for sampled data.
simps : Integrators for sampled data.
cumtrapz : Cumulative integration for sampled data.
ode : ODE integrator.
odeint : ODE integrator.
References
----------
.. [1] 'Romberg's method' http://en.wikipedia.org/wiki/Romberg%27s_method
Examples
--------
Integrate a gaussian from 0 to 1 and compare to the error function.
>>> from scipy import integrate
>>> from scipy.special import erf
>>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2)
>>> result = integrate.romberg(gaussian, 0, 1, show=True)
Romberg integration of <function vfunc at ...> from [0, 1]
::
Steps StepSize Results
1 1.000000 0.385872
2 0.500000 0.412631 0.421551
4 0.250000 0.419184 0.421368 0.421356
8 0.125000 0.420810 0.421352 0.421350 0.421350
16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350
32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350
The final result is 0.421350396475 after 33 function evaluations.
>>> print("%g %g" % (2*result, erf(1)))
0.842701 0.842701
"""
if np.isinf(a) or np.isinf(b):
raise ValueError("Romberg integration only available "
"for finite limits.")
vfunc = vectorize1(function, args, vec_func=vec_func)
n = 1
interval = [a, b]
intrange = b - a
ordsum = _difftrap(vfunc, interval, n)
result = intrange * ordsum
resmat = [[result]]
err = np.inf
last_row = resmat[0]
for i in xrange(1, divmax+1):
n *= 2
ordsum += _difftrap(vfunc, interval, n)
row = [intrange * ordsum / n]
for k in xrange(i):
row.append(_romberg_diff(last_row[k], row[k], k+1))
result = row[i]
lastresult = last_row[i-1]
if show:
resmat.append(row)
err = abs(result - lastresult)
if err < tol or err < rtol * abs(result):
break
last_row = row
else:
warnings.warn(
"divmax (%d) exceeded. Latest difference = %e" % (divmax, err),
AccuracyWarning)
if show:
_printresmat(vfunc, interval, resmat)
return result
# Coefficients for Netwon-Cotes quadrature
#
# These are the points being used
# to construct the local interpolating polynomial
# a are the weights for Newton-Cotes integration
# B is the error coefficient.
# error in these coefficients grows as N gets larger.
# or as samples are closer and closer together
# You can use maxima to find these rational coefficients
# for equally spaced data using the commands
# a(i,N) := integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) / ((N-i)! * i!) * (-1)^(N-i);
# Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N));
# Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N));
# B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N));
#
# pre-computed for equally-spaced weights
#
# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N]
#
# a = num_a*array(int_a)/den_a
# B = num_B*1.0 / den_B
#
# integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*)
# where k = N // 2
#
_builtincoeffs = {
1: (1,2,[1,1],-1,12),
2: (1,3,[1,4,1],-1,90),
3: (3,8,[1,3,3,1],-3,80),
4: (2,45,[7,32,12,32,7],-8,945),
5: (5,288,[19,75,50,50,75,19],-275,12096),
6: (1,140,[41,216,27,272,27,216,41],-9,1400),
7: (7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400),
8: (4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989],
-2368,467775),
9: (9,89600,[2857,15741,1080,19344,5778,5778,19344,1080,
15741,2857], -4671, 394240),
10: (5,299376,[16067,106300,-48525,272400,-260550,427368,
-260550,272400,-48525,106300,16067],
-673175, 163459296),
11: (11,87091200,[2171465,13486539,-3237113, 25226685,-9595542,
15493566,15493566,-9595542,25226685,-3237113,
13486539,2171465], -2224234463, 237758976000),
12: (1, 5255250, [1364651,9903168,-7587864,35725120,-51491295,
87516288,-87797136,87516288,-51491295,35725120,
-7587864,9903168,1364651], -3012, 875875),
13: (13, 402361344000,[8181904909, 56280729661, -31268252574,
156074417954,-151659573325,206683437987,
-43111992612,-43111992612,206683437987,
-151659573325,156074417954,-31268252574,
56280729661,8181904909], -2639651053,
344881152000),
14: (7, 2501928000, [90241897,710986864,-770720657,3501442784,
-6625093363,12630121616,-16802270373,19534438464,
-16802270373,12630121616,-6625093363,3501442784,
-770720657,710986864,90241897], -3740727473,
1275983280000)
}
def newton_cotes(rn, equal=0):
"""
Return weights and error coefficient for Newton-Cotes integration.
Suppose we have (N+1) samples of f at the positions
x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the
integral between x_0 and x_N is:
:math:`\\int_{x_0}^{x_N} f(x)dx = \\Delta x \\sum_{i=0}^{N} a_i f(x_i)
+ B_N (\\Delta x)^{N+2} f^{N+1} (\\xi)`
where :math:`\\xi \\in [x_0,x_N]`
and :math:`\\Delta x = \\frac{x_N-x_0}{N}` is the average samples spacing.
If the samples are equally-spaced and N is even, then the error
term is :math:`B_N (\\Delta x)^{N+3} f^{N+2}(\\xi)`.
Parameters
----------
rn : int
The integer order for equally-spaced data or the relative positions of
the samples with the first sample at 0 and the last at N, where N+1 is
the length of `rn`. N is the order of the Newton-Cotes integration.
equal : int, optional
Set to 1 to enforce equally spaced data.
Returns
-------
an : ndarray
1-D array of weights to apply to the function at the provided sample
positions.
B : float
Error coefficient.
Notes
-----
Normally, the Newton-Cotes rules are used on smaller integration
regions and a composite rule is used to return the total integral.
"""
try:
N = len(rn)-1
if equal:
rn = np.arange(N+1)
elif np.all(np.diff(rn) == 1):
equal = 1
except:
N = rn
rn = np.arange(N+1)
equal = 1
if equal and N in _builtincoeffs:
na, da, vi, nb, db = _builtincoeffs[N]
an = na * np.array(vi, dtype=float) / da
return an, float(nb)/db
if (rn[0] != 0) or (rn[-1] != N):
raise ValueError("The sample positions must start at 0"
" and end at N")
yi = rn / float(N)
ti = 2 * yi - 1
nvec = np.arange(N+1)
C = ti ** nvec[:, np.newaxis]
Cinv = np.linalg.inv(C)
# improve precision of result
for i in range(2):
Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv)
vec = 2.0 / (nvec[::2]+1)
ai = Cinv[:, ::2].dot(vec) * (N / 2.)
if (N % 2 == 0) and equal:
BN = N/(N+3.)
power = N+2
else:
BN = N/(N+2.)
power = N+1
BN = BN - np.dot(yi**power, ai)
p1 = power+1
fac = power*math.log(N) - gammaln(p1)
fac = math.exp(fac)
return ai, BN*fac
| apache-2.0 |
deepesch/scikit-learn | sklearn/covariance/tests/test_covariance.py | 142 | 11068 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
mxjl620/scikit-learn | examples/ensemble/plot_partial_dependence.py | 249 | 4456 | """
========================
Partial Dependence Plots
========================
Partial dependence plots show the dependence between the target function [1]_
and a set of 'target' features, marginalizing over the
values of all other features (the complement features). Due to the limits
of human perception the size of the target feature set must be small (usually,
one or two) thus the target features are usually chosen among the most
important features
(see :attr:`~sklearn.ensemble.GradientBoostingRegressor.feature_importances_`).
This example shows how to obtain partial dependence plots from a
:class:`~sklearn.ensemble.GradientBoostingRegressor` trained on the California
housing dataset. The example is taken from [HTF2009]_.
The plot shows four one-way and one two-way partial dependence plots.
The target variables for the one-way PDP are:
median income (`MedInc`), avg. occupants per household (`AvgOccup`),
median house age (`HouseAge`), and avg. rooms per household (`AveRooms`).
We can clearly see that the median house price shows a linear relationship
with the median income (top left) and that the house price drops when the
avg. occupants per household increases (top middle).
The top right plot shows that the house age in a district does not have
a strong influence on the (median) house price; so does the average rooms
per household.
The tick marks on the x-axis represent the deciles of the feature values
in the training data.
Partial dependence plots with two target features enable us to visualize
interactions among them. The two-way partial dependence plot shows the
dependence of median house price on joint values of house age and avg.
occupants per household. We can clearly see an interaction between the
two features:
For an avg. occupancy greater than two, the house price is nearly independent
of the house age, whereas for values less than two there is a strong dependence
on age.
.. [HTF2009] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning Ed. 2", Springer, 2009.
.. [1] For classification you can think of it as the regression score before
the link function.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.datasets.california_housing import fetch_california_housing
# fetch California housing dataset
cal_housing = fetch_california_housing()
# split 80/20 train-test
X_train, X_test, y_train, y_test = train_test_split(cal_housing.data,
cal_housing.target,
test_size=0.2,
random_state=1)
names = cal_housing.feature_names
print('_' * 80)
print("Training GBRT...")
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,
learning_rate=0.1, loss='huber',
random_state=1)
clf.fit(X_train, y_train)
print("done.")
print('_' * 80)
print('Convenience plot with ``partial_dependence_plots``')
print
features = [0, 5, 1, 2, (5, 1)]
fig, axs = plot_partial_dependence(clf, X_train, features, feature_names=names,
n_jobs=3, grid_resolution=50)
fig.suptitle('Partial dependence of house value on nonlocation features\n'
'for the California housing dataset')
plt.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle
print('_' * 80)
print('Custom 3d plot via ``partial_dependence``')
print
fig = plt.figure()
target_feature = (1, 5)
pdp, (x_axis, y_axis) = partial_dependence(clf, target_feature,
X=X_train, grid_resolution=50)
XX, YY = np.meshgrid(x_axis, y_axis)
Z = pdp.T.reshape(XX.shape).T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu)
ax.set_xlabel(names[target_feature[0]])
ax.set_ylabel(names[target_feature[1]])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle('Partial dependence of house value on median age and '
'average occupancy')
plt.subplots_adjust(top=0.9)
plt.show()
| bsd-3-clause |
DillonNovak/Programming-for-Chemical-Engineering-Applications | Breast+Cancer+Diagnosis.py | 1 | 5376 |
# coding: utf-8
# ## Predicting Malignant Tumors
# ### Wisconsin Diagnostic Beast Cancer Dataset
# https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+(Diagnostic)
#
# Dataset attributes:
#
# 0. diagnosis (malignant or benign)
#
# 1. radius (mean of distances from center to points on the perimeter)
# 2. texture (standard deviation of gray-scale values)
# 3. perimeter
# 4. area
# 5. smoothness (local variation in radius lengths)
# 6. compactness (perimeter^2 / area - 1.0)
# 7. concavity (severity of concave portions of the contour)
# 8. concave points (number of concave portions of the contour)
# 9. symmetry
# 10. fractal dimension ("coastline approximation" - 1)
# In[20]:
get_ipython().magic('matplotlib inline')
from sklearn.decomposition import PCA
import sys
import scipy as sp
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import sklearn as sk
import seaborn as sns
sns.set_context('talk')
# In[21]:
#import PCA models
from pandas.tools.plotting import scatter_matrix
from sklearn import model_selection
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
# In[22]:
# load dataset
dfall = pd.read_csv('wdbc.data.txt')
# drop standard error and largest value for each attribute
df = dfall.drop(dfall.columns[[0,3,4,6,7,9,10,12,13,15,16,18,19,21,22,24,25,27,28,30,31]],axis=1)
# name columns
df.columns = ['diagnosis','radius','texture','perimeter','area','smoothness','comactness','concavity','concave points','symmetry','fractal dimension']
# In[23]:
print(df.shape)
print(df.describe())
# In[24]:
print(df.groupby('diagnosis').size())
df.head()
# In[25]:
scatter_matrix(df)
plt.show()
# In[26]:
X = df.ix[:,1:11]
X.tail()
# In[30]:
# plot histogram distribution of each attribute
plt.figure(figsize=(16,6))
plt.subplot(2,5,1)
k = 1
for c in X.columns:
plt.subplot(2,5,k)
plt.hist(X[c],normed=True,alpha=0.6,bins=20)
plt.title(c)
k += 1
plt.tight_layout()
# ### Scaling and Centering
# In[32]:
from sklearn.preprocessing import StandardScaler
X_std = StandardScaler().fit_transform(X)
lbls = X.columns
plt.figure(figsize=(16,6))
plt.subplot(2,5,1)
k = 0
for c in lbls:
plt.subplot(2,5,k+1)
plt.hist(X_std[:,k],normed=True,alpha=0.6,bins=20)
plt.title(c)
k += 1
plt.tight_layout()
# ### PCA Analysis
# In[33]:
pca = PCA(n_components=3)
Y = pca.fit_transform(X_std)
# In[34]:
w = pca.components_
v = pca.explained_variance_ratio_
print(v)
for k in range(0,len(w)):
plt.subplot(3,1,k+1)
plt.bar(range(0,len(w[k])),w[k],width=.5)
plt.xticks(range(0,len(w[k])),lbls)
plt.title('explained variance ratio = {0:.3f}'.format(v[k]))
plt.tight_layout()
# In[35]:
k = 0
for n in df['diagnosis']:
if(df.ix[k,0] == 'M'):
plt.scatter(Y[k,0],Y[k,1],color='red',alpha=0.4)
else:
plt.scatter(Y[k,0],Y[k,1],color='green',alpha=0.4)
k += 1
# ### Predictive Analysis
# Train predictive models to identify malignant tumors and choose the most accurate model to test on a validation set of data
# In[43]:
# split out validation dataset
diagnosisarray = df.values
dfvals = df.drop(df.columns[[0]],axis=1)
#print(dfvals.head())
array = dfvals.values
X = array[:,0:9]
Y = diagnosisarray[:,0]
validation_size = 0.20
seed = 7
X_train, X_validation, Y_train, Y_validation = model_selection.train_test_split(X, Y, test_size=validation_size, random_state=seed)
# In[44]:
#check algorithms
models = []
models.append(('LR', LogisticRegression()))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVM', SVC()))
#evaluate each model
results = []
names = []
for name, model in models:
kfold = model_selection.KFold(n_splits=10, random_state=seed)
cv_results = model_selection.cross_val_score(model, X_train, Y_train, cv=kfold, scoring='accuracy')
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# In[45]:
# compare algorithms
fig = plt.figure()
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
plt.boxplot(results)
ax.set_xticklabels(names)
plt.show()
# LDA is the most accurate.
# Use LDA model to evaluate the validation dataset
# In[16]:
# make predictions on validation dataset
lda = LinearDiscriminantAnalysis()
lda.fit(X_train, Y_train)
predictions = lda.predict(X_validation)
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
# In[47]:
# make predictions on validation dataset
lr = LogisticRegression()
lr.fit(X_train, Y_train)
predictions = lr.predict(X_validation)
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
| gpl-3.0 |
Nyker510/scikit-learn | examples/linear_model/plot_sgd_penalties.py | 249 | 1563 | """
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
plt.plot(xs, l1(xs), "r-", label="L1")
plt.plot(xs, -1.0 * l1(xs), "r-")
plt.plot(-1 * xs, l1(xs), "r-")
plt.plot(-1 * xs, -1.0 * l1(xs), "r-")
plt.plot(xs, l2(xs), "b-", label="L2")
plt.plot(xs, -1.0 * l2(xs), "b-")
plt.plot(-1 * xs, l2(xs), "b-")
plt.plot(-1 * xs, -1.0 * l2(xs), "b-")
plt.plot(xs, el(xs, alpha), "y-", label="Elastic Net")
plt.plot(xs, -1.0 * el(xs, alpha), "y-")
plt.plot(-1 * xs, el(xs, alpha), "y-")
plt.plot(-1 * xs, -1.0 * el(xs, alpha), "y-")
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
| bsd-3-clause |
bhermanmit/openmc | openmc/mgxs/mdgxs.py | 1 | 116899 | from __future__ import division
from collections import Iterable, OrderedDict
import itertools
from numbers import Integral
import warnings
import os
import sys
import copy
from abc import ABCMeta
from six import add_metaclass, string_types
import numpy as np
import openmc
from openmc.mgxs import MGXS
from openmc.mgxs.mgxs import _DOMAIN_TO_FILTER
import openmc.checkvalue as cv
# Supported cross section types
MDGXS_TYPES = ['delayed-nu-fission',
'chi-delayed',
'beta',
'decay-rate',
'delayed-nu-fission matrix']
# Maximum number of delayed groups, from src/constants.F90
MAX_DELAYED_GROUPS = 8
@add_metaclass(ABCMeta)
class MDGXS(MGXS):
"""An abstract multi-delayed-group cross section for some energy and delayed
group structures within some spatial domain.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group and multi-delayed-group cross sections for downstream
neutronics calculations.
NOTE: Users should instantiate the subclasses of this abstract class.
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.Mesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
delayed_groups : list of int
Delayed groups to filter out the xs
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'chi-delayed', 'beta', etc.)
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : Material or Cell or Universe or Mesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
delayed_groups : list of int
Delayed groups to filter out the xs
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : {'tracklength', 'analog'}
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell', and 'universe'
domain types. This is equal to the number of cell instances for
'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file) and the number of mesh cells for
'mesh' domain types.
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U-238', 'O-16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
def __init__(self, domain=None, domain_type=None, energy_groups=None,
delayed_groups=None, by_nuclide=False, name='',
num_polar=1, num_azimuthal=1):
super(MDGXS, self).__init__(domain, domain_type, energy_groups,
by_nuclide, name, num_polar, num_azimuthal)
self._delayed_groups = None
if delayed_groups is not None:
self.delayed_groups = delayed_groups
def __deepcopy__(self, memo):
existing = memo.get(id(self))
# If this is the first time we have tried to copy this object, copy it
if existing is None:
clone = type(self).__new__(type(self))
clone._name = self.name
clone._rxn_type = self.rxn_type
clone._by_nuclide = self.by_nuclide
clone._nuclides = copy.deepcopy(self._nuclides)
clone._domain = self.domain
clone._domain_type = self.domain_type
clone._energy_groups = copy.deepcopy(self.energy_groups, memo)
clone._delayed_groups = copy.deepcopy(self.delayed_groups, memo)
clone._num_polar = self.num_polar
clone._num_azimuthal = self.num_azimuthal
clone._tally_trigger = copy.deepcopy(self.tally_trigger, memo)
clone._rxn_rate_tally = copy.deepcopy(self._rxn_rate_tally, memo)
clone._xs_tally = copy.deepcopy(self._xs_tally, memo)
clone._sparse = self.sparse
clone._derived = self.derived
clone._tallies = OrderedDict()
for tally_type, tally in self.tallies.items():
clone.tallies[tally_type] = copy.deepcopy(tally, memo)
memo[id(self)] = clone
return clone
# If this object has been copied before, return the first copy made
else:
return existing
@property
def _dont_squeeze(self):
"""Create a tuple of axes which should not be removed during the get_xs
process
"""
if self.num_polar > 1 or self.num_azimuthal > 1:
return (0, 1, 3, 4)
else:
return (1, 2)
@property
def delayed_groups(self):
return self._delayed_groups
@property
def num_delayed_groups(self):
if self.delayed_groups is None:
return 1
else:
return len(self.delayed_groups)
@delayed_groups.setter
def delayed_groups(self, delayed_groups):
if delayed_groups is not None:
cv.check_type('delayed groups', delayed_groups, list, int)
cv.check_greater_than('num delayed groups', len(delayed_groups), 0)
# Check that the groups are within [1, MAX_DELAYED_GROUPS]
for group in delayed_groups:
cv.check_greater_than('delayed group', group, 0)
cv.check_less_than('delayed group', group, MAX_DELAYED_GROUPS,
equality=True)
self._delayed_groups = delayed_groups
@property
def filters(self):
# Create the non-domain specific Filters for the Tallies
group_edges = self.energy_groups.group_edges
energy_filter = openmc.EnergyFilter(group_edges)
if self.delayed_groups != None:
delayed_filter = openmc.DelayedGroupFilter(self.delayed_groups)
filters = [[energy_filter], [delayed_filter, energy_filter]]
else:
filters = [[energy_filter], [energy_filter]]
return self._add_angle_filters(filters)
@staticmethod
def get_mgxs(mdgxs_type, domain=None, domain_type=None, energy_groups=None,
delayed_groups=None, by_nuclide=False, name='',
num_polar=1, num_azimuthal=1):
"""Return a MDGXS subclass object for some energy group structure within
some spatial domain for some reaction type.
This is a factory method which can be used to quickly create MDGXS
subclass objects for various reaction types.
Parameters
----------
mdgxs_type : {'delayed-nu-fission', 'chi-delayed', 'beta', 'decay-rate', 'delayed-nu-fission matrix'}
The type of multi-delayed-group cross section object to return
domain : openmc.Material or openmc.Cell or openmc.Universe or
openmc.Mesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain.
Defaults to False
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file. Defaults to the empty string.
delayed_groups : list of int
Delayed groups to filter out the xs
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Returns
-------
openmc.mgxs.MDGXS
A subclass of the abstract MDGXS class for the multi-delayed-group
cross section type requested by the user
"""
cv.check_value('mdgxs_type', mdgxs_type, MDGXS_TYPES)
if mdgxs_type == 'delayed-nu-fission':
mdgxs = DelayedNuFissionXS(domain, domain_type, energy_groups,
delayed_groups)
elif mdgxs_type == 'chi-delayed':
mdgxs = ChiDelayed(domain, domain_type, energy_groups,
delayed_groups)
elif mdgxs_type == 'beta':
mdgxs = Beta(domain, domain_type, energy_groups, delayed_groups)
elif mdgxs_type == 'decay-rate':
mdgxs = DecayRate(domain, domain_type, energy_groups, delayed_groups)
elif mdgxs_type == 'delayed-nu-fission matrix':
mdgxs = DelayedNuFissionMatrixXS(domain, domain_type, energy_groups,
delayed_groups)
mdgxs.by_nuclide = by_nuclide
mdgxs.name = name
mdgxs.num_polar = num_polar
mdgxs.num_azimuthal = num_azimuthal
return mdgxs
def get_xs(self, groups='all', subdomains='all', nuclides='all',
xs_type='macro', order_groups='increasing',
value='mean', delayed_groups='all', squeeze=True, **kwargs):
"""Returns an array of multi-delayed-group cross sections.
This method constructs a 4D NumPy array for the requested
multi-delayed-group cross section data for one or more
subdomains (1st dimension), delayed groups (2nd demension),
energy groups (3rd dimension), and nuclides (4th dimension).
Parameters
----------
groups : Iterable of Integral or 'all'
Energy groups of interest. Defaults to 'all'.
subdomains : Iterable of Integral or 'all'
Subdomain IDs of interest. Defaults to 'all'.
nuclides : Iterable of str or 'all' or 'sum'
A list of nuclide name strings (e.g., ['U-235', 'U-238']). The
special string 'all' will return the cross sections for all nuclides
in the spatial domain. The special string 'sum' will return the
cross section summed over all nuclides. Defaults to 'all'.
xs_type: {'macro', 'micro'}
Return the macro or micro cross section in units of cm^-1 or barns.
Defaults to 'macro'.
order_groups: {'increasing', 'decreasing'}
Return the cross section indexed according to increasing or
decreasing energy groups (decreasing or increasing energies).
Defaults to 'increasing'.
value : {'mean', 'std_dev', 'rel_err'}
A string for the type of value to return. Defaults to 'mean'.
delayed_groups : list of int or 'all'
Delayed groups of interest. Defaults to 'all'.
squeeze : bool
A boolean representing whether to eliminate the extra dimensions
of the multi-dimensional array to be returned. Defaults to True.
Returns
-------
numpy.ndarray
A NumPy array of the multi-group cross section indexed in the order
each group, subdomain and nuclide is listed in the parameters.
Raises
------
ValueError
When this method is called before the multi-delayed-group cross
section is computed from tally data.
"""
cv.check_value('value', value, ['mean', 'std_dev', 'rel_err'])
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
# FIXME: Unable to get microscopic xs for mesh domain because the mesh
# cells do not know the nuclide densities in each mesh cell.
if self.domain_type == 'mesh' and xs_type == 'micro':
msg = 'Unable to get micro xs for mesh domain since the mesh ' \
'cells do not know the nuclide densities in each mesh cell.'
raise ValueError(msg)
filters = []
filter_bins = []
# Construct a collection of the domain filter bins
if not isinstance(subdomains, string_types):
cv.check_iterable_type('subdomains', subdomains, Integral,
max_depth=3)
for subdomain in subdomains:
filters.append(_DOMAIN_TO_FILTER[self.domain_type])
filter_bins.append((subdomain,))
# Construct list of energy group bounds tuples for all requested groups
if not isinstance(groups, string_types):
cv.check_iterable_type('groups', groups, Integral)
for group in groups:
filters.append(openmc.EnergyFilter)
filter_bins.append(
(self.energy_groups.get_group_bounds(group),))
# Construct list of delayed group tuples for all requested groups
if not isinstance(delayed_groups, string_types):
cv.check_type('delayed groups', delayed_groups, list, int)
for delayed_group in delayed_groups:
filters.append(openmc.DelayedGroupFilter)
filter_bins.append((delayed_group,))
# Construct a collection of the nuclides to retrieve from the xs tally
if self.by_nuclide:
if nuclides == 'all' or nuclides == 'sum' or nuclides == ['sum']:
query_nuclides = self.get_nuclides()
else:
query_nuclides = nuclides
else:
query_nuclides = ['total']
# If user requested the sum for all nuclides, use tally summation
if nuclides == 'sum' or nuclides == ['sum']:
xs_tally = self.xs_tally.summation(nuclides=query_nuclides)
xs = xs_tally.get_values(filters=filters,
filter_bins=filter_bins, value=value)
else:
xs = self.xs_tally.get_values(filters=filters,
filter_bins=filter_bins,
nuclides=query_nuclides, value=value)
# Divide by atom number densities for microscopic cross sections
if xs_type == 'micro':
if self.by_nuclide:
densities = self.get_nuclide_densities(nuclides)
else:
densities = self.get_nuclide_densities('sum')
if value == 'mean' or value == 'std_dev':
xs /= densities[np.newaxis, :, np.newaxis]
# Eliminate the trivial score dimension
xs = np.squeeze(xs, axis=len(xs.shape) - 1)
xs = np.nan_to_num(xs)
if groups == 'all':
num_groups = self.num_groups
else:
num_groups = len(groups)
if delayed_groups == 'all':
num_delayed_groups = self.num_delayed_groups
else:
num_delayed_groups = len(delayed_groups)
# Reshape tally data array with separate axes for domain,
# energy groups, delayed groups, and nuclides
# Accommodate the polar and azimuthal bins if needed
num_subdomains = \
int(xs.shape[0] / (num_groups * num_delayed_groups *
self.num_polar * self.num_azimuthal))
if self.num_polar > 1 or self.num_azimuthal > 1:
new_shape = (self.num_polar, self.num_azimuthal, num_subdomains,
num_delayed_groups, num_groups)
else:
new_shape = (num_subdomains, num_delayed_groups, num_groups)
new_shape += xs.shape[1:]
xs = np.reshape(xs, new_shape)
# Reverse data if user requested increasing energy groups since
# tally data is stored in order of increasing energies
if order_groups == 'increasing':
xs = xs[..., ::-1, :]
if squeeze:
# We want to squeeze out everything but the polar, azimuthal,
# delayed group, and energy group data.
xs = self._squeeze_xs(xs)
return xs
def get_slice(self, nuclides=[], groups=[], delayed_groups=[]):
"""Build a sliced MDGXS for the specified nuclides, energy groups,
and delayed groups.
This method constructs a new MDGXS to encapsulate a subset of the data
represented by this MDGXS. The subset of data to include in the tally
slice is determined by the nuclides, energy groups, delayed groups
specified in the input parameters.
Parameters
----------
nuclides : list of str
A list of nuclide name strings
(e.g., ['U-235', 'U-238']; default is [])
groups : list of int
A list of energy group indices starting at 1 for the high energies
(e.g., [1, 2, 3]; default is [])
delayed_groups : list of int
A list of delayed group indices
(e.g., [1, 2, 3]; default is [])
Returns
-------
openmc.mgxs.MDGXS
A new MDGXS object which encapsulates the subset of data requested
for the nuclide(s) and/or energy group(s) and/or delayed group(s)
requested in the parameters.
"""
cv.check_iterable_type('nuclides', nuclides, string_types)
cv.check_iterable_type('energy_groups', groups, Integral)
cv.check_type('delayed groups', delayed_groups, list, int)
# Build lists of filters and filter bins to slice
filters = []
filter_bins = []
if len(groups) != 0:
energy_bins = []
for group in groups:
group_bounds = self.energy_groups.get_group_bounds(group)
energy_bins.append(group_bounds)
filter_bins.append(tuple(energy_bins))
filters.append(openmc.EnergyFilter)
if len(delayed_groups) != 0:
filter_bins.append(tuple(delayed_groups))
filters.append(openmc.DelayedGroupFilter)
# Clone this MGXS to initialize the sliced version
slice_xs = copy.deepcopy(self)
slice_xs._rxn_rate_tally = None
slice_xs._xs_tally = None
# Slice each of the tallies across nuclides and energy groups
for tally_type, tally in slice_xs.tallies.items():
slice_nuclides = [nuc for nuc in nuclides if nuc in tally.nuclides]
if filters != []:
tally_slice = tally.get_slice(filters=filters,
filter_bins=filter_bins,
nuclides=slice_nuclides)
else:
tally_slice = tally.get_slice(nuclides=slice_nuclides)
slice_xs.tallies[tally_type] = tally_slice
# Assign sliced energy group structure to sliced MDGXS
if groups:
new_group_edges = []
for group in groups:
group_edges = self.energy_groups.get_group_bounds(group)
new_group_edges.extend(group_edges)
new_group_edges = np.unique(new_group_edges)
slice_xs.energy_groups.group_edges = sorted(new_group_edges)
# Assign sliced delayed group structure to sliced MDGXS
if delayed_groups:
slice_xs.delayed_groups = delayed_groups
# Assign sliced nuclides to sliced MGXS
if nuclides:
slice_xs.nuclides = nuclides
slice_xs.sparse = self.sparse
return slice_xs
def merge(self, other):
"""Merge another MGXS with this one
MGXS are only mergeable if their energy groups and nuclides are either
identical or mutually exclusive. If results have been loaded from a
statepoint, then MGXS are only mergeable along one and only one of
energy groups or nuclides.
Parameters
----------
other : openmc.mgxs.MDGXS
MDGXS to merge with this one
Returns
-------
merged_mdgxs : openmc.mgxs.MDGXS
Merged MDGXS
"""
merged_mdgxs = super(MDGXS, self).merge(other)
# Merge delayed groups
if self.delayed_groups != other.delayed_groups:
merged_mdgxs.delayed_groups = list(set(self.delayed_groups +
other.delayed_groups))
return merged_mdgxs
def print_xs(self, subdomains='all', nuclides='all', xs_type='macro'):
"""Print a string representation for the multi-group cross section.
Parameters
----------
subdomains : Iterable of Integral or 'all'
The subdomain IDs of the cross sections to include in the report.
Defaults to 'all'.
nuclides : Iterable of str or 'all' or 'sum'
The nuclides of the cross-sections to include in the report. This
may be a list of nuclide name strings (e.g., ['U-235', 'U-238']).
The special string 'all' will report the cross sections for all
nuclides in the spatial domain. The special string 'sum' will report
the cross sections summed over all nuclides. Defaults to 'all'.
xs_type: {'macro', 'micro'}
Return the macro or micro cross section in units of cm^-1 or barns.
Defaults to 'macro'.
"""
if self.delayed_groups is None:
super(MDGXS, self).print_xs(subdomains, nuclides, xs_type)
return
# Construct a collection of the subdomains to report
if not isinstance(subdomains, string_types):
cv.check_iterable_type('subdomains', subdomains, Integral)
elif self.domain_type == 'distribcell':
subdomains = np.arange(self.num_subdomains, dtype=np.int)
elif self.domain_type == 'mesh':
xyz = [range(1, x + 1) for x in self.domain.dimension]
subdomains = list(itertools.product(*xyz))
else:
subdomains = [self.domain.id]
# Construct a collection of the nuclides to report
if self.by_nuclide:
if nuclides == 'all':
nuclides = self.get_nuclides()
elif nuclides == 'sum':
nuclides = ['sum']
else:
cv.check_iterable_type('nuclides', nuclides, string_types)
else:
nuclides = ['sum']
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
# Build header for string with type and domain info
string = 'Multi-Delayed-Group XS\n'
string += '{0: <16}=\t{1}\n'.format('\tReaction Type', self.rxn_type)
string += '{0: <16}=\t{1}\n'.format('\tDomain Type', self.domain_type)
string += '{0: <16}=\t{1}\n'.format('\tDomain ID', self.domain.id)
# Generate the header for an individual XS
xs_header = '\tCross Sections [{0}]:'.format(self.get_units(xs_type))
# If cross section data has not been computed, only print string header
if self.tallies is None:
print(string)
return
# Set polar/azimuthal bins
if self.num_polar > 1 or self.num_azimuthal > 1:
polar_bins = np.linspace(0., np.pi, num=self.num_polar + 1,
endpoint=True)
azimuthal_bins = np.linspace(-np.pi, np.pi,
num=self.num_azimuthal + 1,
endpoint=True)
# Loop over all subdomains
for subdomain in subdomains:
if self.domain_type == 'distribcell' or self.domain_type == 'mesh':
string += '{0: <16}=\t{1}\n'.format('\tSubdomain', subdomain)
# Loop over all Nuclides
for nuclide in nuclides:
# Build header for nuclide type
if nuclide != 'sum':
string += '{0: <16}=\t{1}\n'.format('\tNuclide', nuclide)
# Add the cross section header
string += '{0: <16}\n'.format(xs_header)
for delayed_group in self.delayed_groups:
template = '{0: <12}Delayed Group {1}:\t'
string += template.format('', delayed_group)
string += '\n'
template = '{0: <12}Group {1} [{2: <10} - {3: <10}eV]:\t'
average_xs = self.get_xs(nuclides=[nuclide],
subdomains=[subdomain],
xs_type=xs_type, value='mean',
delayed_groups=[delayed_group])
rel_err_xs = self.get_xs(nuclides=[nuclide],
subdomains=[subdomain],
xs_type=xs_type, value='rel_err',
delayed_groups=[delayed_group])
rel_err_xs = rel_err_xs * 100.
if self.num_polar > 1 or self.num_azimuthal > 1:
# Loop over polar, azimuthal, and energy group ranges
for pol in range(len(polar_bins) - 1):
pol_low, pol_high = polar_bins[pol: pol + 2]
for azi in range(len(azimuthal_bins) - 1):
azi_low, azi_high = azimuthal_bins[azi: azi + 2]
string += '\t\tPolar Angle: [{0:5f} - {1:5f}]'.format(
pol_low, pol_high) + \
'\tAzimuthal Angle: [{0:5f} - {1:5f}]'.format(
azi_low, azi_high) + '\n'
for group in range(1, self.num_groups + 1):
bounds = \
self.energy_groups.get_group_bounds(group)
string += '\t' + template.format('', group,
bounds[0],
bounds[1])
string += '{0:.2e} +/- {1:.2e}%'.format(
average_xs[pol, azi, group - 1],
rel_err_xs[pol, azi, group - 1])
string += '\n'
string += '\n'
else:
# Loop over energy groups ranges
for group in range(1, self.num_groups+1):
bounds = self.energy_groups.get_group_bounds(group)
string += template.format('', group, bounds[0], bounds[1])
string += '{0:.2e} +/- {1:.2e}%'.format(
average_xs[group - 1], rel_err_xs[group - 1])
string += '\n'
string += '\n'
string += '\n'
print(string)
def export_xs_data(self, filename='mgxs', directory='mgxs',
format='csv', groups='all', xs_type='macro',
delayed_groups='all'):
"""Export the multi-delayed-group cross section data to a file.
This method leverages the functionality in the Pandas library to export
the multi-group cross section data in a variety of output file formats
for storage and/or post-processing.
Parameters
----------
filename : str
Filename for the exported file. Defaults to 'mgxs'.
directory : str
Directory for the exported file. Defaults to 'mgxs'.
format : {'csv', 'excel', 'pickle', 'latex'}
The format for the exported data file. Defaults to 'csv'.
groups : Iterable of Integral or 'all'
Energy groups of interest. Defaults to 'all'.
xs_type: {'macro', 'micro'}
Store the macro or micro cross section in units of cm^-1 or barns.
Defaults to 'macro'.
delayed_groups : list of int or 'all'
Delayed groups of interest. Defaults to 'all'.
"""
cv.check_type('filename', filename, string_types)
cv.check_type('directory', directory, string_types)
cv.check_value('format', format, ['csv', 'excel', 'pickle', 'latex'])
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
# Make directory if it does not exist
if not os.path.exists(directory):
os.makedirs(directory)
filename = os.path.join(directory, filename)
filename = filename.replace(' ', '-')
# Get a Pandas DataFrame for the data
df = self.get_pandas_dataframe(groups=groups, xs_type=xs_type,
delayed_groups=delayed_groups)
# Export the data using Pandas IO API
if format == 'csv':
df.to_csv(filename + '.csv', index=False)
elif format == 'excel':
if self.domain_type == 'mesh':
df.to_excel(filename + '.xls')
else:
df.to_excel(filename + '.xls', index=False)
elif format == 'pickle':
df.to_pickle(filename + '.pkl')
elif format == 'latex':
if self.domain_type == 'distribcell':
msg = 'Unable to export distribcell multi-group cross section' \
'data to a LaTeX table'
raise NotImplementedError(msg)
df.to_latex(filename + '.tex', bold_rows=True,
longtable=True, index=False)
# Surround LaTeX table with code needed to run pdflatex
with open(filename + '.tex','r') as original:
data = original.read()
with open(filename + '.tex','w') as modified:
modified.write(
'\\documentclass[preview, 12pt, border=1mm]{standalone}\n')
modified.write('\\usepackage{caption}\n')
modified.write('\\usepackage{longtable}\n')
modified.write('\\usepackage{booktabs}\n')
modified.write('\\begin{document}\n\n')
modified.write(data)
modified.write('\n\\end{document}')
def get_pandas_dataframe(self, groups='all', nuclides='all',
xs_type='macro', paths=True,
delayed_groups='all'):
"""Build a Pandas DataFrame for the MDGXS data.
This method leverages :meth:`openmc.Tally.get_pandas_dataframe`, but
renames the columns with terminology appropriate for cross section data.
Parameters
----------
groups : Iterable of Integral or 'all'
Energy groups of interest. Defaults to 'all'.
nuclides : Iterable of str or 'all' or 'sum'
The nuclides of the cross-sections to include in the dataframe. This
may be a list of nuclide name strings (e.g., ['U-235', 'U-238']).
The special string 'all' will include the cross sections for all
nuclides in the spatial domain. The special string 'sum' will
include the cross sections summed over all nuclides. Defaults
to 'all'.
xs_type: {'macro', 'micro'}
Return macro or micro cross section in units of cm^-1 or barns.
Defaults to 'macro'.
paths : bool, optional
Construct columns for distribcell tally filters (default is True).
The geometric information in the Summary object is embedded into
a Multi-index column with a geometric "path" to each distribcell
instance.
delayed_groups : list of int or 'all'
Delayed groups of interest. Defaults to 'all'.
Returns
-------
pandas.DataFrame
A Pandas DataFrame for the cross section data.
Raises
------
ValueError
When this method is called before the multi-delayed-group cross
section is computed from tally data.
"""
if not isinstance(groups, string_types):
cv.check_iterable_type('groups', groups, Integral)
if nuclides != 'all' and nuclides != 'sum':
cv.check_iterable_type('nuclides', nuclides, string_types)
if not isinstance(delayed_groups, string_types):
cv.check_type('delayed groups', delayed_groups, list, int)
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
# Get a Pandas DataFrame from the derived xs tally
if self.by_nuclide and nuclides == 'sum':
# Use tally summation to sum across all nuclides
xs_tally = self.xs_tally.summation(nuclides=self.get_nuclides())
df = xs_tally.get_pandas_dataframe(paths=paths)
# Remove nuclide column since it is homogeneous and redundant
if self.domain_type == 'mesh':
df.drop('sum(nuclide)', axis=1, level=0, inplace=True)
else:
df.drop('sum(nuclide)', axis=1, inplace=True)
# If the user requested a specific set of nuclides
elif self.by_nuclide and nuclides != 'all':
xs_tally = self.xs_tally.get_slice(nuclides=nuclides)
df = xs_tally.get_pandas_dataframe(paths=paths)
# If the user requested all nuclides, keep nuclide column in dataframe
else:
df = self.xs_tally.get_pandas_dataframe(paths=paths)
# Remove the score column since it is homogeneous and redundant
if self.domain_type == 'mesh':
df = df.drop('score', axis=1, level=0)
else:
df = df.drop('score', axis=1)
# Convert azimuthal, polar, energy in and energy out bin values in to
# bin indices
columns = self._df_convert_columns_to_bins(df)
# Select out those groups the user requested
if not isinstance(groups, string_types):
if 'group in' in df:
df = df[df['group in'].isin(groups)]
if 'group out' in df:
df = df[df['group out'].isin(groups)]
# If user requested micro cross sections, divide out the atom densities
if xs_type == 'micro':
if self.by_nuclide:
densities = self.get_nuclide_densities(nuclides)
else:
densities = self.get_nuclide_densities('sum')
densities = np.repeat(densities, len(self.rxn_rate_tally.scores))
tile_factor = int(df.shape[0] / len(densities))
df['mean'] /= np.tile(densities, tile_factor)
df['std. dev.'] /= np.tile(densities, tile_factor)
# Sort the dataframe by domain type id (e.g., distribcell id) and
# energy groups such that data is from fast to thermal
if self.domain_type == 'mesh':
mesh_str = 'mesh {0}'.format(self.domain.id)
df.sort_values(by=[(mesh_str, 'x'), (mesh_str, 'y'),
(mesh_str, 'z')] + columns, inplace=True)
else:
df.sort_values(by=[self.domain_type] + columns, inplace=True)
return df
class ChiDelayed(MDGXS):
r"""The delayed fission spectrum.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group and multi-delayed-group cross sections for multi-group
neutronics calculations. At a minimum, one needs to set the
:attr:`ChiDelayed.energy_groups` and :attr:`ChiDelayed.domain` properties.
Tallies for the flux and appropriate reaction rates over the specified
domain are generated automatically via the :attr:`ChiDelayed.tallies`
property, which can then be appended to a :class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross
section can then be obtained from the :attr:`ChiDelayed.xs_tally` property.
For a spatial domain :math:`V`, energy group :math:`[E_g,E_{g-1}]`, and
delayed group :math:`d`, the delayed fission spectrum is calculated as:
.. math::
\langle \nu^d \sigma_{f,g' \rightarrow g} \phi \rangle &= \int_{r \in V}
dr \int_{4\pi} d\Omega' \int_0^\infty dE' \int_{E_g}^{E_{g-1}} dE \;
\chi(E) \nu^d \sigma_f (r, E') \psi(r, E', \Omega')\\
\langle \nu^d \sigma_f \phi \rangle &= \int_{r \in V} dr \int_{4\pi}
d\Omega' \int_0^\infty dE' \int_0^\infty dE \; \chi(E) \nu^d \sigma_f (r,
E') \psi(r, E', \Omega') \\
\chi_g^d &= \frac{\langle \nu^d \sigma_{f,g' \rightarrow g} \phi \rangle}
{\langle \nu^d \sigma_f \phi \rangle}
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.Mesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
delayed_groups : list of int
Delayed groups to filter out the xs
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : Material or Cell or Universe or Mesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
delayed_groups : list of int
Delayed groups to filter out the xs
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : 'analog'
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`ChiDelayed.tally_keys` property and
values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. When the This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U-238', 'O-16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
def __init__(self, domain=None, domain_type=None, energy_groups=None,
delayed_groups=None, by_nuclide=False, name='',
num_polar=1, num_azimuthal=1):
super(ChiDelayed, self).__init__(domain, domain_type, energy_groups,
delayed_groups, by_nuclide, name,
num_polar, num_azimuthal)
self._rxn_type = 'chi-delayed'
self._estimator = 'analog'
@property
def scores(self):
return ['delayed-nu-fission', 'delayed-nu-fission']
@property
def filters(self):
# Create the non-domain specific Filters for the Tallies
group_edges = self.energy_groups.group_edges
energyout = openmc.EnergyoutFilter(group_edges)
energyin = openmc.EnergyFilter([group_edges[0], group_edges[-1]])
if self.delayed_groups is not None:
delayed_filter = openmc.DelayedGroupFilter(self.delayed_groups)
filters = [[delayed_filter, energyin], [delayed_filter, energyout]]
else:
filters = [[energyin], [energyout]]
return self._add_angle_filters(filters)
@property
def tally_keys(self):
return ['delayed-nu-fission-in', 'delayed-nu-fission-out']
@property
def rxn_rate_tally(self):
if self._rxn_rate_tally is None:
self._rxn_rate_tally = self.tallies['delayed-nu-fission-out']
self._rxn_rate_tally.sparse = self.sparse
return self._rxn_rate_tally
@property
def xs_tally(self):
if self._xs_tally is None:
delayed_nu_fission_in = self.tallies['delayed-nu-fission-in']
# Remove coarse energy filter to keep it out of tally arithmetic
energy_filter = delayed_nu_fission_in.find_filter(
openmc.EnergyFilter)
delayed_nu_fission_in.remove_filter(energy_filter)
# Compute chi
self._xs_tally = self.rxn_rate_tally / delayed_nu_fission_in
super(ChiDelayed, self)._compute_xs()
# Add the coarse energy filter back to the nu-fission tally
delayed_nu_fission_in.filters.append(energy_filter)
return self._xs_tally
def get_homogenized_mgxs(self, other_mgxs):
"""Construct a homogenized MGXS with other MGXS objects.
This method constructs a new MGXS object that is the flux-weighted
combination of two MGXS objects. It is equivalent to what one would
obtain if the tally spatial domain were designed to encompass the
individual domains for both MGXS objects.
Parameters
----------
other_mgxs : openmc.mgxs.MGXS or Iterable of openmc.mgxs.MGXS
The MGXS to homogenize with this one.
Returns
-------
openmc.mgxs.MGXS
A new homogenized MGXS
Raises
------
ValueError
If the other_mgxs is of a different type.
"""
return self._get_homogenized_mgxs(other_mgxs, 'delayed-nu-fission-in')
def get_slice(self, nuclides=[], groups=[], delayed_groups=[]):
"""Build a sliced ChiDelayed for the specified nuclides and energy
groups.
This method constructs a new MGXS to encapsulate a subset of the data
represented by this MGXS. The subset of data to include in the tally
slice is determined by the nuclides and energy groups specified in
the input parameters.
Parameters
----------
nuclides : list of str
A list of nuclide name strings
(e.g., ['U-235', 'U-238']; default is [])
groups : list of Integral
A list of energy group indices starting at 1 for the high energies
(e.g., [1, 2, 3]; default is [])
delayed_groups : list of int
A list of delayed group indices
(e.g., [1, 2, 3]; default is [])
Returns
-------
openmc.mgxs.MDGXS
A new MDGXS which encapsulates the subset of data requested
for the nuclide(s) and/or energy group(s) and/or delayed group(s)
requested in the parameters.
"""
# Temporarily remove energy filter from delayed-nu-fission-in since its
# group structure will work in super MGXS.get_slice(...) method
delayed_nu_fission_in = self.tallies['delayed-nu-fission-in']
energy_filter = delayed_nu_fission_in.find_filter(openmc.EnergyFilter)
delayed_nu_fission_in.remove_filter(energy_filter)
# Call super class method and null out derived tallies
slice_xs = super(ChiDelayed, self).get_slice(nuclides, groups,
delayed_groups)
slice_xs._rxn_rate_tally = None
slice_xs._xs_tally = None
# Slice energy groups if needed
filters = []
filter_bins = []
if len(groups) != 0:
energy_bins = []
for group in groups:
group_bounds = self.energy_groups.get_group_bounds(group)
energy_bins.append(group_bounds)
filter_bins.append(tuple(energy_bins))
filters.append(openmc.EnergyoutFilter)
if len(delayed_groups) != 0:
filter_bins.append(tuple(delayed_groups))
filters.append(openmc.DelayedGroupFilter)
if filters != []:
# Slice nu-fission-out tally along energyout filter
delayed_nu_fission_out = slice_xs.tallies['delayed-nu-fission-out']
tally_slice = delayed_nu_fission_out.get_slice \
(filters=filters, filter_bins=filter_bins)
slice_xs._tallies['delayed-nu-fission-out'] = tally_slice
# Add energy filter back to nu-fission-in tallies
self.tallies['delayed-nu-fission-in'].add_filter(energy_filter)
slice_xs._tallies['delayed-nu-fission-in'].add_filter(energy_filter)
slice_xs.sparse = self.sparse
return slice_xs
def merge(self, other):
"""Merge another ChiDelayed with this one
If results have been loaded from a statepoint, then ChiDelayed are only
mergeable along one and only one of energy groups or nuclides.
Parameters
----------
other : openmc.mdgxs.MGXS
MGXS to merge with this one
Returns
-------
merged_mdgxs : openmc.mgxs.MDGXS
Merged MDGXS
"""
if not self.can_merge(other):
raise ValueError('Unable to merge ChiDelayed')
# Create deep copy of tally to return as merged tally
merged_mdgxs = copy.deepcopy(self)
merged_mdgxs._derived = True
merged_mdgxs._rxn_rate_tally = None
merged_mdgxs._xs_tally = None
# Merge energy groups
if self.energy_groups != other.energy_groups:
merged_groups = self.energy_groups.merge(other.energy_groups)
merged_mdgxs.energy_groups = merged_groups
# Merge delayed groups
if self.delayed_groups != other.delayed_groups:
merged_mdgxs.delayed_groups = list(set(self.delayed_groups +
other.delayed_groups))
# Merge nuclides
if self.nuclides != other.nuclides:
# The nuclides must be mutually exclusive
for nuclide in self.nuclides:
if nuclide in other.nuclides:
msg = 'Unable to merge Chi Delayed with shared nuclides'
raise ValueError(msg)
# Concatenate lists of nuclides for the merged MGXS
merged_mdgxs.nuclides = self.nuclides + other.nuclides
# Merge tallies
for tally_key in self.tallies:
merged_tally = self.tallies[tally_key].merge\
(other.tallies[tally_key])
merged_mdgxs.tallies[tally_key] = merged_tally
return merged_mdgxs
def get_xs(self, groups='all', subdomains='all', nuclides='all',
xs_type='macro', order_groups='increasing',
value='mean', delayed_groups='all', squeeze=True, **kwargs):
"""Returns an array of the delayed fission spectrum.
This method constructs a 4D NumPy array for the requested
multi-delayed-group cross section data for one or more
subdomains (1st dimension), delayed groups (2nd demension),
energy groups (3rd dimension), and nuclides (4th dimension).
Parameters
----------
groups : Iterable of Integral or 'all'
Energy groups of interest. Defaults to 'all'.
delayed_groups : list of int or 'all'
Delayed groups of interest. Defaults to 'all'.
subdomains : Iterable of Integral or 'all'
Subdomain IDs of interest. Defaults to 'all'.
nuclides : Iterable of str or 'all' or 'sum'
A list of nuclide name strings (e.g., ['U-235', 'U-238']). The
special string 'all' will return the cross sections for all nuclides
in the spatial domain. The special string 'sum' will return the
cross section summed over all nuclides. Defaults to 'all'.
xs_type: {'macro', 'micro'}
This parameter is not relevant for chi but is included here to
mirror the parent MGXS.get_xs(...) class method
order_groups: {'increasing', 'decreasing'}
Return the cross section indexed according to increasing or
decreasing energy groups (decreasing or increasing energies).
Defaults to 'increasing'.
value : {'mean', 'std_dev', 'rel_err'}
A string for the type of value to return. Defaults to 'mean'.
squeeze : bool
A boolean representing whether to eliminate the extra dimensions
of the multi-dimensional array to be returned. Defaults to True.
Returns
-------
numpy.ndarray
A NumPy array of the multi-group and multi-delayed-group cross
section indexed in the order each group, subdomain and nuclide is
listed in the parameters.
Raises
------
ValueError
When this method is called before the multi-group cross section is
computed from tally data.
"""
cv.check_value('value', value, ['mean', 'std_dev', 'rel_err'])
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
# FIXME: Unable to get microscopic xs for mesh domain because the mesh
# cells do not know the nuclide densities in each mesh cell.
if self.domain_type == 'mesh' and xs_type == 'micro':
msg = 'Unable to get micro xs for mesh domain since the mesh ' \
'cells do not know the nuclide densities in each mesh cell.'
raise ValueError(msg)
filters = []
filter_bins = []
# Construct a collection of the domain filter bins
if not isinstance(subdomains, string_types):
cv.check_iterable_type('subdomains', subdomains, Integral,
max_depth=3)
for subdomain in subdomains:
filters.append(_DOMAIN_TO_FILTER[self.domain_type])
filter_bins.append((subdomain,))
# Construct list of energy group bounds tuples for all requested groups
if not isinstance(groups, string_types):
cv.check_iterable_type('groups', groups, Integral)
for group in groups:
filters.append(openmc.EnergyoutFilter)
filter_bins.append(
(self.energy_groups.get_group_bounds(group),))
# Construct list of delayed group tuples for all requested groups
if not isinstance(delayed_groups, string_types):
cv.check_type('delayed groups', delayed_groups, list, int)
for delayed_group in delayed_groups:
filters.append(openmc.DelayedGroupFilter)
filter_bins.append((delayed_group,))
# If chi delayed was computed for each nuclide in the domain
if self.by_nuclide:
# Get the sum as the fission source weighted average chi for all
# nuclides in the domain
if nuclides == 'sum' or nuclides == ['sum']:
# Retrieve the fission production tallies
delayed_nu_fission_in = self.tallies['delayed-nu-fission-in']
delayed_nu_fission_out = self.tallies['delayed-nu-fission-out']
# Sum out all nuclides
nuclides = self.get_nuclides()
delayed_nu_fission_in = delayed_nu_fission_in.summation\
(nuclides=nuclides)
delayed_nu_fission_out = delayed_nu_fission_out.summation\
(nuclides=nuclides)
# Remove coarse energy filter to keep it out of tally arithmetic
energy_filter = delayed_nu_fission_in.find_filter(
openmc.EnergyFilter)
delayed_nu_fission_in.remove_filter(energy_filter)
# Compute chi and store it as the xs_tally attribute so we can
# use the generic get_xs(...) method
xs_tally = delayed_nu_fission_out / delayed_nu_fission_in
# Add the coarse energy filter back to the nu-fission tally
delayed_nu_fission_in.filters.append(energy_filter)
xs = xs_tally.get_values(filters=filters,
filter_bins=filter_bins, value=value)
# Get chi delayed for all nuclides in the domain
elif nuclides == 'all':
nuclides = self.get_nuclides()
xs = self.xs_tally.get_values(filters=filters,
filter_bins=filter_bins,
nuclides=nuclides, value=value)
# Get chi delayed for user-specified nuclides in the domain
else:
cv.check_iterable_type('nuclides', nuclides, string_types)
xs = self.xs_tally.get_values(filters=filters,
filter_bins=filter_bins,
nuclides=nuclides, value=value)
# If chi delayed was computed as an average of nuclides in the domain
else:
xs = self.xs_tally.get_values(filters=filters,
filter_bins=filter_bins, value=value)
# Eliminate the trivial score dimension
xs = np.squeeze(xs, axis=len(xs.shape) - 1)
xs = np.nan_to_num(xs)
# Reshape tally data array with separate axes for domain and energy
if groups == 'all':
num_groups = self.num_groups
else:
num_groups = len(groups)
if delayed_groups == 'all':
num_delayed_groups = self.num_delayed_groups
else:
num_delayed_groups = len(delayed_groups)
# Reshape tally data array with separate axes for domain, energy
# groups, and accomodate the polar and azimuthal bins if needed
num_subdomains = int(xs.shape[0] / (num_delayed_groups *
num_groups * self.num_polar *
self.num_azimuthal))
if self.num_polar > 1 or self.num_azimuthal > 1:
new_shape = (self.num_polar, self.num_azimuthal, num_subdomains,
num_delayed_groups, num_groups)
else:
new_shape = (num_subdomains, num_delayed_groups, num_groups)
new_shape += xs.shape[1:]
xs = np.reshape(xs, new_shape)
# Reverse data if user requested increasing energy groups since
# tally data is stored in order of increasing energies
if order_groups == 'increasing':
xs = xs[..., ::-1, :]
if squeeze:
# We want to squeeze out everything but the polar, azimuthal,
# and energy group data.
xs = self._squeeze_xs(xs)
return xs
class DelayedNuFissionXS(MDGXS):
r"""A fission delayed neutron production multi-group cross section.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group fission neutron production cross sections for multi-group
neutronics calculations. At a minimum, one needs to set the
:attr:`DelayedNuFissionXS.energy_groups` and :attr:`DelayedNuFissionXS.domain`
properties. Tallies for the flux and appropriate reaction rates over the
specified domain are generated automatically via the
:attr:`DelayedNuFissionXS.tallies` property, which can then be appended to a
:class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`DelayedNuFissionXS.xs_tally` property.
For a spatial domain :math:`V`, energy group :math:`[E_g,E_{g-1}]`, and
delayed group :math:`d`, the fission delayed neutron production cross
section is calculated as:
.. math::
\frac{\int_{r \in V} dr \int_{4\pi} d\Omega \int_{E_g}^{E_{g-1}} dE \;
\nu^d \sigma_f (r, E) \psi (r, E, \Omega)}{\int_{r \in V} dr \int_{4\pi}
d\Omega \int_{E_g}^{E_{g-1}} dE \; \psi (r, E, \Omega)}.
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.Mesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
delayed_groups : list of int
Delayed groups to filter out the xs
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : Material or Cell or Universe or Mesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
delayed_groups : list of int
Delayed groups to filter out the xs
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : {'tracklength', 'analog'}
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`DelayedNuFissionXS.tally_keys` property
and values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. When the This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U-238', 'O-16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
def __init__(self, domain=None, domain_type=None, energy_groups=None,
delayed_groups=None, by_nuclide=False, name='',
num_polar=1, num_azimuthal=1):
super(DelayedNuFissionXS, self).__init__(domain, domain_type,
energy_groups, delayed_groups,
by_nuclide, name, num_polar,
num_azimuthal)
self._rxn_type = 'delayed-nu-fission'
class Beta(MDGXS):
r"""The delayed neutron fraction.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group and multi-delayed group cross sections for multi-group
neutronics calculations. At a minimum, one needs to set the
:attr:`Beta.energy_groups` and :attr:`Beta.domain` properties. Tallies for
the flux and appropriate reaction rates over the specified domain are
generated automatically via the :attr:`Beta.tallies` property, which can
then be appended to a :class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`Beta.xs_tally` property.
For a spatial domain :math:`V`, energy group :math:`[E_g,E_{g-1}]`, and
delayed group :math:`d`, the delayed neutron fraction is calculated as:
.. math::
\langle \nu^d \sigma_f \phi \rangle &= \int_{r \in V} dr \int_{4\pi}
d\Omega' \int_0^\infty dE' \int_0^\infty dE \; \chi(E) \nu^d
\sigma_f (r, E') \psi(r, E', \Omega') \\
\langle \nu \sigma_f \phi \rangle &= \int_{r \in V} dr \int_{4\pi}
d\Omega' \int_0^\infty dE' \int_0^\infty dE \; \chi(E) \nu
\sigma_f (r, E') \psi(r, E', \Omega') \\
\beta_{d,g} &= \frac{\langle \nu^d \sigma_f \phi \rangle}
{\langle \nu \sigma_f \phi \rangle}
NOTE: The Beta MGXS is the delayed neutron fraction computed directly from
the nuclear data. Often the delayed neutron fraction is
"importance-weighted" by the adjoint flux and called "beta-effective". It
is important to make clear that this Beta is not importance-weighted.
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.Mesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
delayed_groups : list of int
Delayed groups to filter out the xs
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : Material or Cell or Universe or Mesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
delayed_groups : list of int
Delayed groups to filter out the xs
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : {'tracklength', 'analog'}
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`Beta.tally_keys` property and
values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. When the This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U-238', 'O-16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
def __init__(self, domain=None, domain_type=None, energy_groups=None,
delayed_groups=None, by_nuclide=False, name='',
num_polar=1, num_azimuthal=1):
super(Beta, self).__init__(domain, domain_type, energy_groups,
delayed_groups, by_nuclide, name, num_polar,
num_azimuthal)
self._rxn_type = 'beta'
@property
def scores(self):
return ['nu-fission', 'delayed-nu-fission']
@property
def tally_keys(self):
return ['nu-fission', 'delayed-nu-fission']
@property
def rxn_rate_tally(self):
if self._rxn_rate_tally is None:
self._rxn_rate_tally = self.tallies['delayed-nu-fission']
self._rxn_rate_tally.sparse = self.sparse
return self._rxn_rate_tally
@property
def xs_tally(self):
if self._xs_tally is None:
nu_fission = self.tallies['nu-fission']
# Compute beta
self._xs_tally = self.rxn_rate_tally / nu_fission
super(Beta, self)._compute_xs()
return self._xs_tally
def get_homogenized_mgxs(self, other_mgxs):
"""Construct a homogenized MGXS with other MGXS objects.
This method constructs a new MGXS object that is the flux-weighted
combination of two MGXS objects. It is equivalent to what one would
obtain if the tally spatial domain were designed to encompass the
individual domains for both MGXS objects.
Parameters
----------
other_mgxs : openmc.mgxs.MGXS or Iterable of openmc.mgxs.MGXS
The MGXS to homogenize with this one.
Returns
-------
openmc.mgxs.MGXS
A new homogenized MGXS
Raises
------
ValueError
If the other_mgxs is of a different type.
"""
return self._get_homogenized_mgxs(other_mgxs, 'nu-fission')
class DecayRate(MDGXS):
r"""The decay rate for delayed neutron precursors.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group and multi-delayed group cross sections for multi-group
neutronics calculations. At a minimum, one needs to set the
:attr:`DecayRate.energy_groups` and :attr:`DecayRate.domain` properties.
Tallies for the flux and appropriate reaction rates over the specified
domain are generated automatically via the :attr:`DecayRate.tallies`
property, which can then be appended to a :class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`DecayRate.xs_tally` property.
For a spatial domain :math:`V`, energy group :math:`[E_g,E_{g-1}]`, and
delayed group :math:`d`, the decay rate is calculated as:
.. math::
\langle \lambda_d \nu^d \sigma_f \phi \rangle &= \int_{r \in V} dr
\int_{4\pi} d\Omega' \int_0^\infty dE' \int_0^\infty dE \; \lambda_d \nu^d
\sigma_f (r, E') \psi(r, E', \Omega') \\
\langle \nu^d \sigma_f \phi \rangle &= \int_{r \in V} dr \int_{4\pi}
d\Omega' \int_0^\infty dE' \int_0^\infty dE \; \chi(E) \nu^d
\sigma_f (r, E') \psi(r, E', \Omega') \\
\lambda_d &= \frac{\langle \lambda_d \nu^d \sigma_f \phi \rangle}
{\langle \nu^d \sigma_f \phi \rangle}
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.Mesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
delayed_groups : list of int
Delayed groups to filter out the xs
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : Material or Cell or Universe or Mesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
delayed_groups : list of int
Delayed groups to filter out the xs
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : {'tracklength', 'analog'}
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`DecayRate.tally_keys` property and
values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. When the This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U-238', 'O-16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
def __init__(self, domain=None, domain_type=None, energy_groups=None,
delayed_groups=None, by_nuclide=False, name='',
num_polar=1, num_azimuthal=1):
super(DecayRate, self).__init__(domain, domain_type, energy_groups,
delayed_groups, by_nuclide, name,
num_polar, num_azimuthal)
self._rxn_type = 'decay-rate'
@property
def scores(self):
return ['delayed-nu-fission', 'decay-rate']
@property
def tally_keys(self):
return ['delayed-nu-fission', 'decay-rate']
@property
def filters(self):
# Create the non-domain specific Filters for the Tallies
group_edges = self.energy_groups.group_edges
energy_filter = openmc.EnergyFilter(group_edges)
if self.delayed_groups is not None:
delayed_filter = openmc.DelayedGroupFilter(self.delayed_groups)
filters = [[delayed_filter, energy_filter], [delayed_filter,
energy_filter]]
else:
filters = [[energy_filter], [energy_filter]]
return self._add_angle_filters(filters)
@property
def xs_tally(self):
if self._xs_tally is None:
delayed_nu_fission = self.tallies['delayed-nu-fission']
# Compute the decay rate
self._xs_tally = self.rxn_rate_tally / delayed_nu_fission
super(DecayRate, self)._compute_xs()
return self._xs_tally
def get_homogenized_mgxs(self, other_mgxs):
"""Construct a homogenized MGXS with other MGXS objects.
This method constructs a new MGXS object that is the flux-weighted
combination of two MGXS objects. It is equivalent to what one would
obtain if the tally spatial domain were designed to encompass the
individual domains for both MGXS objects.
Parameters
----------
other_mgxs : openmc.mgxs.MGXS or Iterable of openmc.mgxs.MGXS
The MGXS to homogenize with this one.
Returns
-------
openmc.mgxs.MGXS
A new homogenized MGXS
Raises
------
ValueError
If the other_mgxs is of a different type.
"""
return self._get_homogenized_mgxs(other_mgxs, 'delayed-nu-fission')
@add_metaclass(ABCMeta)
class MatrixMDGXS(MDGXS):
"""An abstract multi-delayed-group cross section for some energy group and
delayed group structure within some spatial domain. This class is
specifically intended for cross sections which depend on both the incoming
and outgoing energy groups and are therefore represented by matrices.
An example of this is the delayed-nu-fission matrix.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group and multi-delayed-group cross sections for downstream neutronics
calculations.
NOTE: Users should instantiate the subclasses of this abstract class.
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.Mesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
delayed_groups : list of int
Delayed groups to filter out the xs
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : Material or Cell or Universe or Mesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
delayed_groups : list of int
Delayed groups to filter out the xs
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : {'tracklength', 'collision', 'analog'}
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file) and the number of mesh cells for
'mesh' domain types.
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U238', 'O16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
@property
def _dont_squeeze(self):
"""Create a tuple of axes which should not be removed during the get_xs
process
"""
if self.num_polar > 1 or self.num_azimuthal > 1:
return (0, 1, 3, 4, 5)
else:
return (1, 2, 3)
@property
def filters(self):
# Create the non-domain specific Filters for the Tallies
group_edges = self.energy_groups.group_edges
energy = openmc.EnergyFilter(group_edges)
energyout = openmc.EnergyoutFilter(group_edges)
if self.delayed_groups is not None:
delayed = openmc.DelayedGroupFilter(self.delayed_groups)
filters = [[energy], [delayed, energy, energyout]]
else:
filters = [[energy], [energy, energyout]]
return self._add_angle_filters(filters)
def get_xs(self, in_groups='all', out_groups='all',
subdomains='all', nuclides='all',
xs_type='macro', order_groups='increasing',
row_column='inout', value='mean', delayed_groups='all',
squeeze=True, **kwargs):
"""Returns an array of multi-group cross sections.
This method constructs a 4D NumPy array for the requested
multi-group cross section data for one or more subdomains
(1st dimension), delayed groups (2nd dimension), energy groups in
(3rd dimension), energy groups out (4th dimension), and nuclides
(5th dimension).
Parameters
----------
in_groups : Iterable of Integral or 'all'
Incoming energy groups of interest. Defaults to 'all'.
out_groups : Iterable of Integral or 'all'
Outgoing energy groups of interest. Defaults to 'all'.
subdomains : Iterable of Integral or 'all'
Subdomain IDs of interest. Defaults to 'all'.
nuclides : Iterable of str or 'all' or 'sum'
A list of nuclide name strings (e.g., ['U235', 'U238']). The
special string 'all' will return the cross sections for all
nuclides in the spatial domain. The special string 'sum' will
return the cross section summed over all nuclides. Defaults to
'all'.
xs_type: {'macro', 'micro'}
Return the macro or micro cross section in units of cm^-1 or barns.
Defaults to 'macro'.
order_groups: {'increasing', 'decreasing'}
Return the cross section indexed according to increasing or
decreasing energy groups (decreasing or increasing energies).
Defaults to 'increasing'.
row_column: {'inout', 'outin'}
Return the cross section indexed first by incoming group and
second by outgoing group ('inout'), or vice versa ('outin').
Defaults to 'inout'.
value : {'mean', 'std_dev', 'rel_err'}
A string for the type of value to return. Defaults to 'mean'.
delayed_groups : list of int or 'all'
Delayed groups of interest. Defaults to 'all'.
squeeze : bool
A boolean representing whether to eliminate the extra dimensions
of the multi-dimensional array to be returned. Defaults to True.
Returns
-------
numpy.ndarray
A NumPy array of the multi-group cross section indexed in the order
each group and subdomain is listed in the parameters.
Raises
------
ValueError
When this method is called before the multi-group cross section is
computed from tally data.
"""
cv.check_value('value', value, ['mean', 'std_dev', 'rel_err'])
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
# FIXME: Unable to get microscopic xs for mesh domain because the mesh
# cells do not know the nuclide densities in each mesh cell.
if self.domain_type == 'mesh' and xs_type == 'micro':
msg = 'Unable to get micro xs for mesh domain since the mesh ' \
'cells do not know the nuclide densities in each mesh cell.'
raise ValueError(msg)
filters = []
filter_bins = []
# Construct a collection of the domain filter bins
if not isinstance(subdomains, string_types):
cv.check_iterable_type('subdomains', subdomains, Integral,
max_depth=3)
for subdomain in subdomains:
filters.append(_DOMAIN_TO_FILTER[self.domain_type])
filter_bins.append((subdomain,))
# Construct list of energy group bounds tuples for all requested groups
if not isinstance(in_groups, string_types):
cv.check_iterable_type('groups', in_groups, Integral)
for group in in_groups:
filters.append(openmc.EnergyFilter)
filter_bins.append((
self.energy_groups.get_group_bounds(group),))
# Construct list of energy group bounds tuples for all requested groups
if not isinstance(out_groups, string_types):
cv.check_iterable_type('groups', out_groups, Integral)
for group in out_groups:
filters.append(openmc.EnergyoutFilter)
filter_bins.append((
self.energy_groups.get_group_bounds(group),))
# Construct list of delayed group tuples for all requested groups
if not isinstance(delayed_groups, string_types):
cv.check_type('delayed groups', delayed_groups, list, int)
for delayed_group in delayed_groups:
filters.append(openmc.DelayedGroupFilter)
filter_bins.append((delayed_group,))
# Construct a collection of the nuclides to retrieve from the xs tally
if self.by_nuclide:
if nuclides == 'all' or nuclides == 'sum' or nuclides == ['sum']:
query_nuclides = self.get_nuclides()
else:
query_nuclides = nuclides
else:
query_nuclides = ['total']
# Use tally summation if user requested the sum for all nuclides
if nuclides == 'sum' or nuclides == ['sum']:
xs_tally = self.xs_tally.summation(nuclides=query_nuclides)
xs = xs_tally.get_values(filters=filters, filter_bins=filter_bins,
value=value)
else:
xs = self.xs_tally.get_values(filters=filters,
filter_bins=filter_bins,
nuclides=query_nuclides, value=value)
# Divide by atom number densities for microscopic cross sections
if xs_type == 'micro':
if self.by_nuclide:
densities = self.get_nuclide_densities(nuclides)
else:
densities = self.get_nuclide_densities('sum')
if value == 'mean' or value == 'std_dev':
xs /= densities[np.newaxis, :, np.newaxis]
# Eliminate the trivial score dimension
xs = np.squeeze(xs, axis=len(xs.shape) - 1)
xs = np.nan_to_num(xs)
if in_groups == 'all':
num_in_groups = self.num_groups
else:
num_in_groups = len(in_groups)
if out_groups == 'all':
num_out_groups = self.num_groups
else:
num_out_groups = len(out_groups)
if delayed_groups == 'all':
num_delayed_groups = self.num_delayed_groups
else:
num_delayed_groups = len(delayed_groups)
# Reshape tally data array with separate axes for domain and energy
# Accomodate the polar and azimuthal bins if needed
num_subdomains = int(xs.shape[0] / (num_delayed_groups *
num_in_groups * num_out_groups *
self.num_polar *
self.num_azimuthal))
if self.num_polar > 1 or self.num_azimuthal > 1:
new_shape = (self.num_polar, self.num_azimuthal, num_subdomains,
num_delayed_groups, num_in_groups, num_out_groups)
new_shape += xs.shape[1:]
xs = np.reshape(xs, new_shape)
# Transpose the matrix if requested by user
if row_column == 'outin':
xs = np.swapaxes(xs, 4, 5)
else:
new_shape = (num_subdomains, num_delayed_groups, num_in_groups,
num_out_groups)
new_shape += xs.shape[1:]
xs = np.reshape(xs, new_shape)
# Transpose the matrix if requested by user
if row_column == 'outin':
xs = np.swapaxes(xs, 2, 3)
# Reverse data if user requested increasing energy groups since
# tally data is stored in order of increasing energies
if order_groups == 'increasing':
xs = xs[..., ::-1, ::-1, :]
if squeeze:
# We want to squeeze out everything but the polar, azimuthal,
# and in/out energy group data.
xs = self._squeeze_xs(xs)
return xs
def get_slice(self, nuclides=[], in_groups=[], out_groups=[],
delayed_groups=[]):
"""Build a sliced MatrixMDGXS object for the specified nuclides and
energy groups.
This method constructs a new MdGXS to encapsulate a subset of the data
represented by this MdGXS. The subset of data to include in the tally
slice is determined by the nuclides, energy groups, and delayed groups
specified in the input parameters.
Parameters
----------
nuclides : list of str
A list of nuclide name strings
(e.g., ['U235', 'U238']; default is [])
in_groups : list of int
A list of incoming energy group indices starting at 1 for the high
energies (e.g., [1, 2, 3]; default is [])
out_groups : list of int
A list of outgoing energy group indices starting at 1 for the high
energies (e.g., [1, 2, 3]; default is [])
delayed_groups : list of int
A list of delayed group indices
(e.g., [1, 2, 3]; default is [])
Returns
-------
openmc.mgxs.MatrixMDGXS
A new MatrixMDGXS object which encapsulates the subset of data
requested for the nuclide(s) and/or energy group(s) requested in
the parameters.
"""
# Call super class method and null out derived tallies
slice_xs = super(MatrixMDGXS, self).get_slice(nuclides, in_groups,
delayed_groups)
slice_xs._rxn_rate_tally = None
slice_xs._xs_tally = None
# Slice outgoing energy groups if needed
if len(out_groups) != 0:
filter_bins = []
for group in out_groups:
group_bounds = self.energy_groups.get_group_bounds(group)
filter_bins.append(group_bounds)
filter_bins = [tuple(filter_bins)]
# Slice each of the tallies across energyout groups
for tally_type, tally in slice_xs.tallies.items():
if tally.contains_filter(openmc.EnergyoutFilter):
tally_slice = tally.get_slice(
filters=[openmc.EnergyoutFilter],
filter_bins=filter_bins)
slice_xs.tallies[tally_type] = tally_slice
slice_xs.sparse = self.sparse
return slice_xs
def print_xs(self, subdomains='all', nuclides='all', xs_type='macro'):
"""Prints a string representation for the multi-group cross section.
Parameters
----------
subdomains : Iterable of Integral or 'all'
The subdomain IDs of the cross sections to include in the report.
Defaults to 'all'.
nuclides : Iterable of str or 'all' or 'sum'
The nuclides of the cross-sections to include in the report. This
may be a list of nuclide name strings (e.g., ['U235', 'U238']).
The special string 'all' will report the cross sections for all
nuclides in the spatial domain. The special string 'sum' will
report the cross sections summed over all nuclides. Defaults to
'all'.
xs_type: {'macro', 'micro'}
Return the macro or micro cross section in units of cm^-1 or barns.
Defaults to 'macro'.
"""
# Construct a collection of the subdomains to report
if not isinstance(subdomains, string_types):
cv.check_iterable_type('subdomains', subdomains, Integral)
elif self.domain_type == 'distribcell':
subdomains = np.arange(self.num_subdomains, dtype=np.int)
elif self.domain_type == 'mesh':
xyz = [range(1, x + 1) for x in self.domain.dimension]
subdomains = list(itertools.product(*xyz))
else:
subdomains = [self.domain.id]
# Construct a collection of the nuclides to report
if self.by_nuclide:
if nuclides == 'all':
nuclides = self.get_nuclides()
if nuclides == 'sum':
nuclides = ['sum']
else:
cv.check_iterable_type('nuclides', nuclides, string_types)
else:
nuclides = ['sum']
cv.check_value('xs_type', xs_type, ['macro', 'micro'])
# Build header for string with type and domain info
string = 'Multi-Delayed-Group XS\n'
string += '{0: <16}=\t{1}\n'.format('\tReaction Type', self.rxn_type)
string += '{0: <16}=\t{1}\n'.format('\tDomain Type', self.domain_type)
string += '{0: <16}=\t{1}\n'.format('\tDomain ID', self.domain.id)
# Generate the header for an individual XS
xs_header = '\tCross Sections [{0}]:'.format(self.get_units(xs_type))
# If cross section data has not been computed, only print string header
if self.tallies is None:
print(string)
return
string += '{0: <16}\n'.format('\tEnergy Groups:')
template = '{0: <12}Group {1} [{2: <10} - {3: <10}eV]\n'
# Loop over energy groups ranges
for group in range(1, self.num_groups + 1):
bounds = self.energy_groups.get_group_bounds(group)
string += template.format('', group, bounds[0], bounds[1])
# Set polar and azimuthal bins if necessary
if self.num_polar > 1 or self.num_azimuthal > 1:
pol_bins = np.linspace(0., np.pi, num=self.num_polar + 1,
endpoint=True)
azi_bins = np.linspace(-np.pi, np.pi, num=self.num_azimuthal + 1,
endpoint=True)
# Loop over all subdomains
for subdomain in subdomains:
if self.domain_type == 'distribcell':
string += '{: <16}=\t{}\n'.format('\tSubdomain', subdomain)
# Loop over all Nuclides
for nuclide in nuclides:
# Build header for nuclide type
if xs_type != 'sum':
string += '{: <16}=\t{}\n'.format('\tNuclide', nuclide)
# Build header for cross section type
string += '{: <16}\n'.format(xs_header)
if self.delayed_groups is not None:
for delayed_group in self.delayed_groups:
template = '{0: <12}Delayed Group {1}:\t'
string += template.format('', delayed_group)
string += '\n'
template = '{0: <12}Group {1} -> Group {2}:\t\t'
average_xs = self.get_xs(nuclides=[nuclide],
subdomains=[subdomain],
xs_type=xs_type, value='mean',
delayed_groups=[delayed_group])
rel_err_xs = self.get_xs(nuclides=[nuclide],
subdomains=[subdomain],
xs_type=xs_type,
value='rel_err',
delayed_groups=[delayed_group])
rel_err_xs = rel_err_xs * 100.
if self.num_polar > 1 or self.num_azimuthal > 1:
# Loop over polar, azi, and in/out group ranges
for pol in range(len(pol_bins) - 1):
pol_low, pol_high = pol_bins[pol: pol + 2]
for azi in range(len(azi_bins) - 1):
azi_low, azi_high = azi_bins[azi: azi + 2]
string += '\t\tPolar Angle: [{0:5f} - {1:5f}]'.format(
pol_low, pol_high) + \
'\tAzimuthal Angle: [{0:5f} - {1:5f}]'.format(
azi_low, azi_high) + '\n'
for in_group in range(1, self.num_groups + 1):
for out_group in range(1, self.num_groups + 1):
string += '\t' + template.format(
'', in_group, out_group)
string += '{0:.2e} +/- {1:.2e}%'.format(
average_xs[pol, azi, in_group - 1,
out_group - 1],
rel_err_xs[pol, azi, in_group - 1,
out_group - 1])
string += '\n'
string += '\n'
string += '\n'
else:
# Loop over incoming/outgoing energy groups ranges
for in_group in range(1, self.num_groups + 1):
for out_group in range(1, self.num_groups + 1):
string += template.format(
'', in_group, out_group)
string += '{:.2e} +/- {:.2e}%'.format(
average_xs[in_group-1, out_group-1],
rel_err_xs[in_group-1, out_group-1])
string += '\n'
string += '\n'
string += '\n'
else:
template = '{0: <12}Group {1} -> Group {2}:\t\t'
average_xs = self.get_xs(nuclides=[nuclide],
subdomains=[subdomain],
xs_type=xs_type, value='mean')
rel_err_xs = self.get_xs(nuclides=[nuclide],
subdomains=[subdomain],
xs_type=xs_type, value='rel_err')
rel_err_xs = rel_err_xs * 100.
if self.num_polar > 1 or self.num_azimuthal > 1:
# Loop over polar, azi, and in/out energy group ranges
for pol in range(len(pol_bins) - 1):
pol_low, pol_high = pol_bins[pol: pol + 2]
for azi in range(len(azi_bins) - 1):
azi_low, azi_high = azi_bins[azi: azi + 2]
string += '\t\tPolar Angle: [{0:5f} - {1:5f}]'.format(
pol_low, pol_high) + \
'\tAzimuthal Angle: [{0:5f} - {1:5f}]'.format(
azi_low, azi_high) + '\n'
for in_group in range(1, self.num_groups + 1):
for out_group in range(1, self.num_groups + 1):
string += '\t' + template.format(
'', in_group, out_group)
string += '{0:.2e} +/- {1:.2e}%'.format(
average_xs[pol, azi, in_group - 1,
out_group - 1],
rel_err_xs[pol, azi, in_group - 1,
out_group - 1])
string += '\n'
string += '\n'
string += '\n'
else:
# Loop over incoming/outgoing energy groups ranges
for in_group in range(1, self.num_groups + 1):
for out_group in range(1, self.num_groups + 1):
string += template.format('', in_group,
out_group)
string += '{0:.2e} +/- {1:.2e}%'.format(
average_xs[in_group - 1, out_group - 1],
rel_err_xs[in_group - 1, out_group - 1])
string += '\n'
string += '\n'
string += '\n'
string += '\n'
string += '\n'
print(string)
class DelayedNuFissionMatrixXS(MatrixMDGXS):
r"""A fission delayed neutron production matrix multi-group cross section.
This class can be used for both OpenMC input generation and tally data
post-processing to compute spatially-homogenized and energy-integrated
multi-group fission neutron production cross sections for multi-group
neutronics calculations. At a minimum, one needs to set the
:attr:`DelayedNuFissionMatrixXS.energy_groups` and
:attr:`DelayedNuFissionMatrixXS.domain` properties. Tallies for the flux and
appropriate reaction rates over the specified domain are generated
automatically via the :attr:`DelayedNuFissionMatrixXS.tallies` property,
which can then be appended to a :class:`openmc.Tallies` instance.
For post-processing, the :meth:`MGXS.load_from_statepoint` will pull in the
necessary data to compute multi-group cross sections from a
:class:`openmc.StatePoint` instance. The derived multi-group cross section
can then be obtained from the :attr:`DelayedNuFissionMatrixXS.xs_tally`
property.
For a spatial domain :math:`V`, energy group :math:`[E_g,E_{g-1}]`, and
delayed group :math:`d`, the fission delayed neutron production cross
section is calculated as:
.. math::
\langle \nu\sigma_{f,g'\rightarrow g} \phi \rangle &= \int_{r \in V} dr
\int_{4\pi} d\Omega' \int_{E_{g'}}^{E_{g'-1}} dE' \int_{E_g}^{E_{g-1}} dE
\; \chi(E) \nu\sigma_f^d (r, E') \psi(r, E', \Omega')\\
\langle \phi \rangle &= \int_{r \in V} dr \int_{4\pi} d\Omega
\int_{E_g}^{E_{g-1}} dE \; \psi (r, E, \Omega) \\
\nu\sigma_{f,g'\rightarrow g} &= \frac{\langle \nu\sigma_{f,g'\rightarrow
g}^d \phi \rangle}{\langle \phi \rangle}
Parameters
----------
domain : openmc.Material or openmc.Cell or openmc.Universe or openmc.Mesh
The domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
The domain type for spatial homogenization
groups : openmc.mgxs.EnergyGroups
The energy group structure for energy condensation
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
name : str, optional
Name of the multi-group cross section. Used as a label to identify
tallies in OpenMC 'tallies.xml' file.
delayed_groups : list of int
Delayed groups to filter out the xs
num_polar : Integral, optional
Number of equi-width polar angle bins for angle discretization;
defaults to one bin
num_azimuthal : Integral, optional
Number of equi-width azimuthal angle bins for angle discretization;
defaults to one bin
Attributes
----------
name : str, optional
Name of the multi-group cross section
rxn_type : str
Reaction type (e.g., 'total', 'nu-fission', etc.)
by_nuclide : bool
If true, computes cross sections for each nuclide in domain
domain : Material or Cell or Universe or Mesh
Domain for spatial homogenization
domain_type : {'material', 'cell', 'distribcell', 'universe', 'mesh'}
Domain type for spatial homogenization
energy_groups : openmc.mgxs.EnergyGroups
Energy group structure for energy condensation
delayed_groups : list of int
Delayed groups to filter out the xs
num_polar : Integral
Number of equi-width polar angle bins for angle discretization
num_azimuthal : Integral
Number of equi-width azimuthal angle bins for angle discretization
tally_trigger : openmc.Trigger
An (optional) tally precision trigger given to each tally used to
compute the cross section
scores : list of str
The scores in each tally used to compute the multi-group cross section
filters : list of openmc.Filter
The filters in each tally used to compute the multi-group cross section
tally_keys : list of str
The keys into the tallies dictionary for each tally used to compute
the multi-group cross section
estimator : 'analog'
The tally estimator used to compute the multi-group cross section
tallies : collections.OrderedDict
OpenMC tallies needed to compute the multi-group cross section. The keys
are strings listed in the :attr:`DelayedNuFissionXS.tally_keys` property
and values are instances of :class:`openmc.Tally`.
rxn_rate_tally : openmc.Tally
Derived tally for the reaction rate tally used in the numerator to
compute the multi-group cross section. This attribute is None
unless the multi-group cross section has been computed.
xs_tally : openmc.Tally
Derived tally for the multi-group cross section. This attribute
is None unless the multi-group cross section has been computed.
num_subdomains : int
The number of subdomains is unity for 'material', 'cell' and 'universe'
domain types. When the This is equal to the number of cell instances
for 'distribcell' domain types (it is equal to unity prior to loading
tally data from a statepoint file).
num_nuclides : int
The number of nuclides for which the multi-group cross section is
being tracked. This is unity if the by_nuclide attribute is False.
nuclides : Iterable of str or 'sum'
The optional user-specified nuclides for which to compute cross
sections (e.g., 'U-238', 'O-16'). If by_nuclide is True but nuclides
are not specified by the user, all nuclides in the spatial domain
are included. This attribute is 'sum' if by_nuclide is false.
sparse : bool
Whether or not the MGXS' tallies use SciPy's LIL sparse matrix format
for compressed data storage
loaded_sp : bool
Whether or not a statepoint file has been loaded with tally data
derived : bool
Whether or not the MGXS is merged from one or more other MGXS
hdf5_key : str
The key used to index multi-group cross sections in an HDF5 data store
"""
def __init__(self, domain=None, domain_type=None, energy_groups=None,
delayed_groups=None, by_nuclide=False, name='',
num_polar=1, num_azimuthal=1):
super(DelayedNuFissionMatrixXS, self).__init__(domain, domain_type,
energy_groups,
delayed_groups,
by_nuclide, name,
num_polar,
num_azimuthal)
self._rxn_type = 'delayed-nu-fission'
self._hdf5_key = 'delayed-nu-fission matrix'
self._estimator = 'analog'
self._valid_estimators = ['analog']
| mit |
severinson/coded-computing-tools | rateless.py | 2 | 15846 | '''Optimize rateless codes for distributed computing
'''
import math
import random
import logging
import numpy as np
import pandas as pd
import pyrateless
import stats
import complexity
import overhead
import pynumeric
import tempfile
import subprocess
from os import path
from multiprocessing import Pool
def optimize_lt_parameters(num_inputs=None, target_overhead=None,
target_failure_probability=None):
'''find good lt code parameters
returns: a tuple (c, delta, mode)
'''
c, delta = pyrateless.heuristic(
num_inputs=num_inputs,
target_failure_probability=target_failure_probability,
target_overhead=target_overhead,
)
# compute the robust Soliton distribution mode
mode = pyrateless.coding.stats.mode_from_delta_c(
num_inputs=num_inputs,
delta=delta,
c=c,
)
return c, delta, mode
def lt_encoding_complexity(num_inputs=None, failure_prob=None,
target_overhead=None, code_rate=None):
'''Return the decoding complexity of LT codes. Computed from the
average of the degree distribution.
The number of columns is assumed to be 1. Scale the return value
of this function by the actual number of columns to get the
correct complexity.
'''
# find good LT code parameters
if num_inputs == 2:
mode = 2
delta = 0.9999999701976676
else:
c, delta, mode = optimize_lt_parameters(
num_inputs=num_inputs,
target_overhead=target_overhead,
target_failure_probability=failure_prob,
)
avg_degree = pyrateless.Soliton(
delta=delta,
mode=mode,
symbols=num_inputs).mean()
encoding_complexity = pyrateless.optimize.complexity.encoding_additions(
avg_degree,
code_rate,
num_inputs,
1, # number of columns
) * complexity.ADDITION_COMPLEXITY
encoding_complexity += pyrateless.optimize.complexity.encoding_multiplications(
avg_degree,
code_rate,
num_inputs,
1,
) * complexity.MULTIPLICATION_COMPLEXITY
return encoding_complexity
def lt_decoding_complexity(num_inputs=None, failure_prob=None,
target_overhead=None):
'''Return the decoding complexity of LT codes. Data is manually
entered from simulations carried out using
https://github.com/severinson/RaptorCodes
'''
# maps a tuple (num_inputs, target_failure_probability,
# target_overhead) to a tuple (num_inactivations,
# num_row_operations). contains simulated results.
if failure_prob == 1e-1:
filename = './results/LT_1e-1.csv'
elif failure_prob == 1e-3:
filename = './results/LT_1e-3.csv'
elif failure_prob == 1e-6:
filename = './results/LT_1e-6.csv'
elif failure_prob == 1e-9:
filename = './results/LT_1e-9.csv'
else:
logging.error('no results for tfp={}'.format(failure_prob))
try:
df = pd.read_csv(filename)
except:
logging.error('could not load file {}.'.format(filename))
return math.inf
overhead = round(num_inputs*(target_overhead-1))
df = df.loc[df['num_inputs'] == num_inputs]
df = df.loc[df['overhead'] == overhead]
if len(df) != 1:
logging.warning(
'did not find exactly 1 row for num_inputs={}, failure_prob={}, target_overhead=: {} symbols'.format(
num_inputs, failure_prob, overhead, df,))
return math.inf
a = df['diagonalize_decoding_additions']
a += df['diagonalize_rowadds']
a += df['solve_dense_decoding_additions']
a += df['solve_dense_rowadds']
a += df['backsolve_decoding_additions']
a += df['backsolve_rowadds']
a = a.values[0]
m = df['diagonalize_decoding_multiplications']
m += df['diagonalize_rowmuls']
m += df['solve_dense_decoding_multiplications']
m += df['solve_dense_rowmuls']
m += df['backsolve_decoding_multiplications']
m += df['backsolve_rowmuls']
m = m.values[0]
return a*complexity.ADDITION_COMPLEXITY + m*complexity.MULTIPLICATION_COMPLEXITY
def evaluate(parameters, target_overhead=None,
target_failure_probability=None,
pdf_fun=None, partitioned=False,
cachedir=None):
'''evaluate LT code performance.
args:
parameters: system parameters.
pdf_fun: see rateless.performance_integral
partitioned: evaluate the performance of the scheme using a partitioned LT
code with rows_per_batch number of partitions. this case is easy to
evaluate as we will always receive the same coded symbols for each
partition. in particular, if it is possible to decode one partition, we can
decode all others as well. this is only true for
num_partitions=rows_per_batch.
returns: dict with performance results.
'''
assert target_overhead > 1
assert 0 < target_failure_probability < 1
assert isinstance(partitioned, bool)
# we support only either no partitioning or exactly rows_per_batch
# partitions. this case is much simpler to handle due to all partitions
# behaving the same only in this instance.
if partitioned:
num_partitions = parameters.rows_per_batch
else:
num_partitions = 1
# guaranteed to be an integer
num_inputs = int(parameters.num_source_rows / num_partitions)
# compute encoding complexity
encoding_complexity = lt_encoding_complexity(
num_inputs=num_inputs,
failure_prob=target_failure_probability,
target_overhead=target_overhead,
code_rate=parameters.q/parameters.num_servers,
)
encoding_complexity *= parameters.num_columns
encoding_complexity *= num_partitions
encoding_complexity *= parameters.muq
# compute decoding complexity
decoding_complexity = lt_decoding_complexity(
num_inputs=num_inputs,
failure_prob=target_failure_probability,
target_overhead=target_overhead,
)
decoding_complexity *= num_partitions
decoding_complexity *= parameters.num_outputs
# find good code parameters
if num_inputs == 2:
mode = 2
delta = 0.9999999701976676
else:
c, delta, mode = optimize_lt_parameters(
num_inputs=num_inputs,
target_overhead=target_overhead,
target_failure_probability=target_failure_probability,
)
logging.debug(
'LT mode=%d, delta=%f for %d input symbols, target overhead %f, target failure probability %f. partitioned: %r',
mode, delta, parameters.num_source_rows,
target_overhead, target_failure_probability,
partitioned,
)
# scale the number of multiplications required for encoding/decoding and
# store in a new dict.
result = dict()
# compute encoding delay
result['encode'] = stats.order_mean_shiftexp(
parameters.num_servers,
parameters.num_servers,
parameter=encoding_complexity / parameters.num_servers,
)
# compute decoding delay
result['reduce'] = stats.order_mean_shiftexp(
parameters.q,
parameters.q,
parameter=decoding_complexity / parameters.q,
)
# simulate the map phase load/delay. this simulation takes into account the
# probability of decoding at various levels of overhead.
simulated = performance_integral(
parameters=parameters,
num_inputs=num_inputs,
target_overhead=target_overhead,
mode=mode,
delta=delta,
pdf_fun=pdf_fun,
cachedir=cachedir,
)
result['delay'] = simulated['delay']
result['load'] = simulated['load']
return result
def lt_success_pdf(overhead_levels, num_inputs=None, mode=None, delta=None):
'''evaluate the decoding probability pdf.
args:
overhead_levels: iterable of overhead levels to evaluate the PDF at.
num_inputs: number of input symbols.
returns: a vector of the same length as overhead_levels, where i-th element
is the probability of decoding at an overhead of overhead_levels[i].
'''
# create a distribution object. this is needed for the decoding success
# probability estimate.
soliton = pyrateless.Soliton(
symbols=num_inputs,
mode=mode,
failure_prob=delta,
)
# compute the probability of decoding at discrete levels of overhead. the
# first element is zero to take make the pdf sum to 1.
decoding_cdf = np.fromiter(
[0] + [1-pyrateless.optimize.decoding_failure_prob_estimate(
soliton=soliton,
num_inputs=num_inputs,
overhead=x) for x in overhead_levels
], dtype=float)
# differentiate the CDF to obtain the PDF
decoding_pdf = np.diff(decoding_cdf)
return decoding_pdf
def lt_success_samples(n, target_overhead=None, num_inputs=None, mode=None, delta=None):
'''sample the decoding probability distribution.
'''
assert n > 0
assert n % 1 == 0
if target_overhead is None:
target_overhead = 1
# create a distribution object. this is needed for the decoding success
# probability estimate.
soliton = pyrateless.Soliton(
symbols=num_inputs,
mode=mode,
failure_prob=delta,
)
cdf = lambda x: 1-pyrateless.optimize.decoding_failure_prob_estimate(
soliton=soliton,
num_inputs=num_inputs,
overhead=x,
)
# with Pool(processes=12) as pool:
samples = np.fromiter((
pynumeric.cnuminv(
fun=cdf,
target=random.random(),
lower=target_overhead,
) for _ in range(n)), dtype=float)
return np.maximum(samples, target_overhead)
def random_fountain_success_pdf(overhead_levels, field_size=2, num_inputs=None, mode=None, delta=None):
'''compute the decoding success probability PDF of a random fountain code over
a field of size field_size.
'''
assert field_size % 1 == 0, field_size
absolute_overhead = np.fromiter(
(num_inputs*(x-1) for x in overhead_levels),
dtype=float,
).round()
if absolute_overhead.min() < 0:
raise ValueError("error for overhead levels {}. overhead must be >=1.".format(overhead_levels))
decoding_cdf = 1-np.power(field_size, -absolute_overhead)
decoding_pdf = np.zeros(len(decoding_cdf))
decoding_pdf[1:] = np.diff(decoding_cdf)
decoding_pdf[0] = decoding_cdf[0]
return decoding_pdf
def performance_integral(parameters=None, num_inputs=None, target_overhead=None,
mode=None, delta=None, pdf_fun=None, num_overhead_levels=100,
max_overhead=None, cachedir=None):
'''compute average performance by taking into account the probability of
finishing at different levels of overhead.
pdf_fun: function used to evaluate the decoding success probability.
defaults to rateless.lt_success_pdf if None. a function given here
must have the same signature as this function.
num_overhead_levels: performance is evaluated at num_overhead_levels levels
of overhead between target_overhead and the maximum possible overhead.
'''
if pdf_fun is None:
pdf_fun = lt_success_pdf
assert callable(pdf_fun)
# get the max possible overhead
if max_overhead is None:
max_overhead = parameters.num_coded_rows / parameters.num_source_rows
if max_overhead < target_overhead:
raise ValueError("target overhead may not exceed the inverse of the code rate")
# evaluate the performance at various levels of overhead
overhead_levels = np.linspace(target_overhead, max_overhead, num_overhead_levels)
# compute the probability of decoding at the respective levels of overhead
decoding_probabilities = pdf_fun(
overhead_levels,
num_inputs=num_inputs,
mode=mode,
delta=delta,
)
# compute load/delay at the levels of overhead
results = list()
for overhead_level, decoding_probability in zip(overhead_levels, decoding_probabilities):
# monte carlo simulation of the load/delay at this overhead
df = overhead.performance_from_overhead(
parameters=parameters,
overhead=overhead_level,
design_overhead=target_overhead,
cachedir=cachedir,
)
# average the columns of the df
result = {label:df[label].mean() for label in df}
# multiply by the probability of decoding at this overhead level
for label in result:
result[label] *= decoding_probability
results.append(result)
# create a dataframe and sum along the columns
df = pd.DataFrame(results)
return {label:df[label].sum() for label in df}
def order_pdf(parameters=None,
target_overhead=None,
target_failure_probability=None,
partitioned=False,
num_overhead_levels=100,
num_samples=100000,
cachedir=None):
'''simulate the order PDF, i.e., the PDF over the number of servers needed to
decode successfully.
num_samples: total number of samples to take of the number of servers
needed. the PDF is inferred from all samples.
returns: two arrays (order_values, order_probabilities) with the possible
number of servers needed and the probability of needing that number of
servers, respectively.
'''
# we support only either no partitioning or exactly rows_per_batch
# partitions. this case is much simpler to handle due to all partitions
# behaving the same only in this instance.
if partitioned:
num_partitions = parameters.rows_per_batch
else:
num_partitions = 1
# guaranteed to be an integer
num_inputs = int(round(parameters.num_source_rows / num_partitions))
# find good LT code parameters
c, delta, mode = optimize_lt_parameters(
num_inputs=num_inputs,
target_overhead=target_overhead,
target_failure_probability=target_failure_probability,
)
# get the max possible overhead
max_overhead = parameters.num_coded_rows / parameters.num_source_rows
# evaluate the performance at various levels of overhead
overhead_levels = np.linspace(target_overhead, max_overhead, num_overhead_levels)
# compute the probability of decoding at the respective levels of overhead
decoding_probabilities = lt_success_pdf(
overhead_levels,
num_inputs=num_inputs,
mode=mode,
delta=delta,
)
# simulate the number of servers needed at each level of overhead. the
# number of samples taken is weighted by the probability of needing this
# overhead.
results = list()
for overhead_level, decoding_probability in zip(overhead_levels, decoding_probabilities):
# the number of samples correspond to the probability of decoding at
# this level of overhead.
overhead_samples = int(round(decoding_probability * num_samples))
# monte carlo simulation of the load/delay at this overhead
df = overhead.performance_from_overhead(
parameters=parameters,
overhead=overhead_level,
design_overhead=target_overhead,
num_samples=overhead_samples,
cachedir=cachedir,
)
results.append(df)
# concatenate all samples into a single dataframe
samples = pd.concat(results, ignore_index=True)
print('rateless samples', samples)
# compute the empiric order cdf and return
order_count = samples['servers'].value_counts(normalize=True)
order_count.sort_index(inplace=True)
order_values = np.array(order_count.index)
order_probabilities = order_count.values
return order_values, order_probabilities
| apache-2.0 |
jwlawson/tensorflow | tensorflow/examples/tutorials/word2vec/word2vec_basic.py | 6 | 10430 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic word2vec example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
from tempfile import gettempdir
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
# pylint: disable=redefined-outer-name
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
local_filename = os.path.join(gettempdir(), filename)
if not os.path.exists(local_filename):
local_filename, _ = urllib.request.urlretrieve(url + filename,
local_filename)
statinfo = os.stat(local_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception('Failed to verify ' + local_filename +
'. Can you get to it with a browser?')
return local_filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data(filename)
print('Data size', len(vocabulary))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0: # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
# Filling 4 global variables:
# data - list of codes (integers from 0 to vocabulary_size-1).
# This is the original text but words are replaced by their codes
# count - map of words(strings) to count of occurrences
# dictionary - map of words(strings) to their codes(integers)
# reverse_dictionary - maps codes(integers) to words(strings)
data, count, dictionary, reverse_dictionary = build_dataset(vocabulary,
vocabulary_size)
del vocabulary # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
if data_index + span > len(data):
data_index = 0
buffer.extend(data[data_index:data_index + span])
data_index += span
for i in range(batch_size // num_skips):
context_words = [w for w in range(span) if w != skip_window]
words_to_use = random.sample(context_words, num_skips)
for j, context_word in enumerate(words_to_use):
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[context_word]
if data_index == len(data):
buffer.extend(data[0:span])
data_index = span
else:
buffer.append(data[data_index])
data_index += 1
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
num_sampled = 64 # Number of negative examples to sample.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent. These 3 variables are used only for
# displaying model accuracy, they don't affect calculation.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
# Explanation of the meaning of NCE loss:
# http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
# pylint: disable=missing-docstring
# Function to draw visualization of distance between embeddings.
def plot_with_labels(low_dim_embs, labels, filename):
assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
# pylint: disable=g-import-not-at-top
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels, os.path.join(gettempdir(), 'tsne.png'))
except ImportError as ex:
print('Please install sklearn, matplotlib, and scipy to show embeddings.')
print(ex)
| apache-2.0 |
jpinsonault/android_sensor_logger | python_scripts/cluster_light.py | 1 | 2894 | import numpy as np
import argparse
from pprint import pprint
from sklearn import mixture
from sklearn import datasets
import matplotlib.pyplot as plt
from sklearn import decomposition
from LogEntry import LogEntry
from LogEntry import db
from datetime import datetime
from matplotlib.dates import DayLocator, HourLocator, DateFormatter, drange
from numpy import arange
args = None
format_string = '%H:%M:%S %m/%d/%Y'
def parse_args():
global args
parser = argparse.ArgumentParser()
args = parser.parse_args()
def main():
g = mixture.GMM(n_components=4)
log_entries = load_light()
light_data = [min(row.light_reading, 120) for row in log_entries]
timestamps = [datetime.strptime(row.timestamp, format_string) for row in log_entries]
g.fit(light_data)
predictions = predict(g, light_data)
light_dict = {}
inside = bin_by_hour(timestamps, predictions, [0,1])
outside = bin_by_hour(timestamps, predictions, [2,3])
pprint(inside)
pprint(outside)
def plot_light_data(timestamps, predictions):
fig, ax = plt.subplots()
ax.plot_date(timestamps, predictions, 'b')
ax.xaxis.set_minor_locator(HourLocator(arange(0,25,6)))
ax.xaxis.set_minor_formatter(DateFormatter('%H'))
ax.xaxis.set_major_locator(DayLocator())
ax.xaxis.set_major_formatter(DateFormatter('%a'))
ax.fmt_xdata = DateFormatter('%H:%M:%S')
fig.autofmt_xdate()
plt.show()
def bin_by_hour(timestamps, predictions, clusters):
filtered = [timestamps[index] for index, entry in enumerate(timestamps) if predictions[index] in clusters]
buckets = {hour: 0 for hour in range(24)}
for time in filtered:
hour = time.hour
buckets[hour] = buckets.get(hour, 0) + 1
return buckets
def predict(gmm, data):
results = gmm.predict(data)
smoothed = smooth_results(results)
converter = make_converter(gmm, smoothed)
return [converter[value] for value in smoothed]
def load_light():
light_data = LogEntry.select()
return sorted(light_data, key=lambda row: datetime.strptime(row.timestamp, format_string))
def smooth_results(data):
new_data = []
for index in range(len(data)):
new_data.append(get_most_common(data, index))
return new_data
def make_converter(gmm, data):
converter = {}
means = [[index, value[0]] for index, value in enumerate(gmm.means_)]
for index, mean in enumerate(sorted(means, key=lambda means: means[1])):
converter[mean[0]] = index
return converter
def get_most_common(data, index):
window_size = 100
start = max(index - window_size, 0)
end = min(index + window_size, len(data))
buckets = {}
for value in data[start:end]:
buckets[value] = buckets.get(value, 0) + 1
return max(buckets.iterkeys(), key=(lambda key: buckets[key]))
if __name__ == '__main__':
main() | mit |
blaisb/cfdemUtilities | independentTests/dragSphere.py | 2 | 1890 | # This program is a simple ODE solver for the case of the drag around a single sphere
# This can be used to predict the stability of the CFDEM coupling time and to play around with the concepts
# Time integration is Euler scheme and Euler form for the drag is assumed
# TODO
# Verlet integration should be added to see if this changes or not something
# Author : Bruno Blais
# Python imports
import math
import numpy
import matplotlib.pyplot as plt
# Simulation parameters to input manually
#-----------------------------------------
uf=1e-5
mu = 1e-5
rhof = 1 #fluid density
rhop = 2400 #particle density
dp = 0.01
tf = 15.0000 #final time of the test / stop the ODE
ratio=1.3
drag="Rong" # or constant
g = 9.81
#------------------------------------------
#Initial velocity of the particle
up=0
dt =ratio * rhop * dp**2 / mu /18.
dt= ratio * 1./(3./4. * rhof/rhop * 1./dp * abs(uf)*0.44)
dt=0.0001
n=tf/dt
#Caclulate velocity evolution of particle
t = numpy.arange(0,tf+dt,dt)
#Mass of particle
m = 4 * numpy.pi / 3 * dp**3 / 8 * rhop
u=numpy.zeros([len(t)]) + up
x=numpy.zeros([len(t)])
ReMax=0
#Begin ODE scheme
for i in range(0,len(t)-1):
ur = uf-u[i]
Rep = numpy.abs(rhof * ur * dp/mu)
ReMax=max(Rep,ReMax)
if (drag=="Stokes"):
Cd = 24 / Rep
elif (drag=="Rong"):
Cd = (0.63 + 2.4/(Rep)**(1./2.))**2
elif (drag=="Newton"):
Cd=0.44
Fd = 0.125 * rhof * numpy.pi * dp**2 * numpy.abs(ur)*ur * Cd
u[i+1] = u[i] + dt * Fd/m - dt * g
# Numerical stability critera for comparison with measured stability
print "Stability criteria is : ", rhop * dp**2 / mu /18.
print "Stability criteria ratio is: ", dt/ (rhop * dp**2 / mu /18.)
print "Maximal Reynolds reached is: ", ReMax
#Plot evolution of velocity in time
plt.figure()
plt.plot(t,u)
plt.show()
#Plot evolution of velocity in space
plt.figure()
plt.plot(x,u)
plt.show()
| lgpl-3.0 |
TAMU-CLASS/barnfire | src/materials_bondarenko.py | 1 | 17178 | '''
Andrew Till
Summer 2014
Bondarenko iteration utility for materials
'''
#STDLIB
import os
import shutil
#TPL
import numpy as np
#MINE
from materials_util import is_fissionable
import materials_util as util
from directories import get_common_directories
import Readgroupr as readgroupr
import PDTXS as pdtxs
def perform_bondarenko_iterations(inputDict, materials, verbosity):
'''Driver for Bondarenko iterations'''
maxIterations = inputDict['numberiterationsmax']
maxError = inputDict['errormax']
useSimpleRxn = inputDict['simplereactions']
scatMatrixFormat = inputDict['format']
rxnsToPrint = inputDict['printopt']
energyMeshPath = inputDict['mesh']
fluxBasePath = inputDict['fluxes']
#
dirDict = get_common_directories()
rootDirr = dirDict['gendf']
outDirr = dirDict['pdtxs']
#
energyMesh = None
if energyMeshPath is not None:
energyMesh = np.loadtxt(energyMeshPath, dtype=np.int, skiprows=2, usecols=[0])[:-1]
numElements = len(np.unique(energyMesh))
energyMeshFilenameOut = 'mesh_{0}.txt'.format(numElements)
energyMeshPathOut = os.path.join(outDirr, energyMeshFilenameOut)
shutil.copy2(energyMeshPath, energyMeshPathOut)
#
if verbosity:
print '------- Bondarenko -------'
fluxDict = read_fluxes(fluxBasePath, materials)
for material in materials:
backgroundXSDict = iterate_one_material(rootDirr, material, maxError, maxIterations, energyMesh, fluxDict, verbosity)
if maxIterations < 0:
unset_background_xs_dict(material, backgroundXSDict, verbosity)
print_one_material(rootDirr, outDirr, material, backgroundXSDict, scatMatrixFormat, useSimpleRxn, rxnsToPrint, energyMesh, fluxDict, verbosity)
if verbosity:
key = backgroundXSDict.keys()[0]
numGroups = len(backgroundXSDict[key])
print 'Number of groups is', numGroups
def read_fluxes(fluxBasePath, materials):
'''Read in flux files'''
fluxDict = {}
if fluxBasePath is None:
return None
for material in materials:
shortName = material.shortName
fluxPath = fluxBasePath.format(m=shortName)
fluxDict[shortName] = np.loadtxt(fluxPath, skiprows=1, usecols=[1])
return fluxDict
def print_one_material(rootDirr, outDirr, material, backgroundXSDict, scatMatrixFormat, useSimpleRxn, rxnsToPrint, energyMesh, fluxDict, verbosity):
'''Print PDT XS for one material. Prints both the component-wise and combined material xs's. Requires unique shortName for each material to prevent over-writing.'''
txs2mtDict = readgroupr.get_short2mt_dict(readgroupr.get_endf_mt_list())
T = material.temperature
ZAList = sorted(material.ZAList)
for (Z,A) in ZAList:
sig0Vec = backgroundXSDict[(Z,A)]
numGroups = len(sig0Vec)
Sab = material.SabDict[(Z,A)]
sym = material.symDict[Z]
shortName = material.shortName
# Metastable isomeric states use the groundstate A + 400
effA = A % 400
metastableStr = ''
if A // 400 > 0:
metastableStr = 'm'
leafDirr = util.get_nuclide_dirr(sym, effA, Sab, metastableStr)
inDirr = os.path.join(rootDirr, leafDirr)
readerOpt = 'gendf'
outName = 'xs_{0}_{1}-{2}_{3}.data'.format(shortName, sym.lower(), A, numGroups)
pickleName = None
thermalMTList = ['{0}'.format(txs2mtDict[txs]) for txs in material.thermalXSDict[(Z,A)]]
thermalMTStr = ' '.join(thermalMTList)
parser = readgroupr.define_input_parser()
parseStr = '-i {i} -o {o} -O {O} -P {P} -w {w} -p {p} -t {t} -T {T} -f {f}'.format(
i=inDirr, o=outDirr, O=outName, P=pickleName, p=rxnsToPrint, w=readerOpt, t=thermalMTStr, T=T, f=scatMatrixFormat)
if useSimpleRxn:
parseStr += ' -m 1 2 18 102 221 452 -M 2 18 221 -t 221'
if verbosity > 2:
print 'Calling ./Readgroupr', parseStr
if verbosity:
print 'Printing XS to {0}'.format(os.path.join(outDirr, outName))
readerDict = vars(parser.parse_args(parseStr.split()))
if fluxDict is not None:
readerDict['flux'] = fluxDict[material.shortName]
readerDict['energyMesh'] = energyMesh
readerDict['sig0Vec'] = sig0Vec
readgroupr.finish_parsing(readerDict)
readgroupr.execute_reader(readerDict)
if verbosity > 2:
plot_bondarenko(rootDirr, backgroundXSDict)
form_and_print_macroscopic_xs(outDirr, ZAList, material, numGroups, verbosity)
def form_and_print_macroscopic_xs(dirr, ZAList, material, numGroups, verbosity=False):
'''Combine all microscopic component XS into one macroscopic material XS'''
shortName = material.shortName
MTinvel = 259
MTfission = 18
MTnutot = 452
MTnudelay = 455
MTnuprompt = 456
MTdecay = 457
MTfissEnergy = 458
MTwgt = 1099
MTchi = 1018
MTnuSigF = 1452
MTdecayConst = 1054
MTdelayedChi = 2055
MTssNu = 2452
MTssChi = 2018
MTfissionMatrix = 2518
# Read in component cross sections
xsDictIn = {}
for (Z,A) in ZAList:
sym = material.symDict[Z]
inName = 'xs_{0}_{1}-{2}_{3}.data'.format(shortName, sym.lower(), A, numGroups)
inPath = os.path.join(dirr, inName)
xsDictIn[(Z,A)] = pdtxs.read_PDT_xs_generally(inPath)
# Initialize material cross section dictionary (xsOut)
key = (Z,A)
t = xsDictIn[key]
numDNGs = 0
# Find the numDNGs of fissile material
for (Z,A) in xsDictIn:
t = xsDictIn[(Z,A)]
if numDNGs == 0:
numDNGs = t.D
elif numDNGs != t.D and t.D != 0:
assert (numDNGs == t.D), 'Fissile material ({0}-{1}) has different number of delayed neutron groups'.format(Z,A)
t.D = numDNGs
microStr = 'Macroscopic cross sections are in units of cm^-1.'
xsDict = pdtxs.PDT_XS(t.G, t.M, t.D, t.T, t.typeStr, microStr, t.Eg, t.dE, {})
xsOut = xsDict.xs
# Keep a reaction if it appears in at least one component
MTs = set()
for (Z,A) in xsDictIn:
MTs.update(xsDictIn[(Z,A)].xs.keys())
# A material-average decay rate does not make sense, so do not compute one
if MTdecay in MTs:
MTs.remove(MTdecay)
# Initialize 0D XS
MTs0D = [MT for MT in MTs if MT in [MTdecay, MTfissEnergy]]
for MT in MTs0D:
xsOut[MT] = 0.
# Initialize 1D XS
MTs1D = [MT for MT in MTs if (MT < 2500 and MT not in [MTs0D, MTdecayConst, MTdelayedChi])]
for MT in MTs1D:
xsOut[MT] = np.zeros(xsDict.G)
# Initialize delayed neutron precurse decay constant (MT 1054)
if MTdecayConst in MTs:
xsOut[MTdecayConst] = np.zeros(xsDict.D)
# Initialize delayed neutron spectra (MT 2055)
if MTdelayedChi in MTs:
xsOut[MTdelayedChi] = np.zeros((xsDict.D, xsDict.G))
# Initialize transfer matrices
MTsXfer = [MT for MT in MTs if MT >= 2500]
for MT in MTsXfer:
if MT == MTfissionMatrix:
xsOut[MT] = np.zeros((xsDict.G, xsDict.G))
else:
xsOut[MT] = np.zeros((xsDict.M, xsDict.G, xsDict.G))
# Save denominators for XS that are averages instead of density-weighted sums
MTsAvg = [MT for MT in MTs if MT in [MTwgt, MTnutot, MTnudelay, MTnuprompt, \
MTchi, MTdelayedChi, MTdecayConst, MTfissEnergy, MTinvel, MTssNu, MTssChi]]
norms = {}
for MT in MTsAvg:
norms[MT] = 0.
# Compute XS averages and sums by adding in the contribution of each component
for (Z,A) in ZAList:
xsIn = xsDictIn[(Z,A)].xs
compDensity = material.atomDensity * material.elemAtomFracDict[Z] * material.abundanceDict[(Z,A)]
# Compute flux weight and its sum for this component
wgt = 0.
wgtSum = 1.
if MTwgt in xsIn:
wgt = xsIn[MTwgt]
wgtSum = np.sum(wgt)
if not wgtSum:
wgtSum = 1.
xsOut[MTwgt] += compDensity * wgt / wgtSum
norms[MTwgt] += compDensity
# Compute fission rate sum for this component
fissRate = 0.
if MTnutot in xsIn:
fissRate = np.sum(xsIn[MTnutot] * xsIn[MTfission] * wgt) / wgtSum
fissRatePrompt = 0.
if MTnuSigF in xsIn:
fissRatePrompt = np.sum(xsIn[MTnuSigF] * wgt) / wgtSum
fissRateDelayed = 0.
if MTnudelay in xsIn:
fissRateDelayed = np.sum(xsIn[MTnudelay] * xsIn[MTfission] * wgt) / wgtSum
# Update numerator and denominator for energy per fission using fission-source weighting
if MTfissEnergy in xsIn:
xsOut[MTfissEnergy] += compDensity * fissRate * xsIn[MTfissEnergy]
norms[MTfissEnergy] += compDensity * fissRate
# Update numerator and denominator for chi using fission-source weighting
if MTchi in xsIn:
xsOut[MTchi] += compDensity * fissRatePrompt * xsIn[MTchi]
norms[MTchi] += compDensity * fissRatePrompt
# Update numerator and denominator for delayed chi using fission-source weighting
if MTdelayedChi in xsIn:
xsOut[MTdelayedChi] += compDensity * fissRateDelayed * xsIn[MTdelayedChi]
norms[MTdelayedChi] += compDensity * fissRateDelayed
# Delayed neutron decay constant should be consistant for all nuclides
if MTdecayConst in xsIn:
xsOut[MTdecayConst] += 1.0 * xsIn[MTdecayConst]
norms[MTdecayConst] += 1.0
# Update numerator and denominator for steady-state chi using fission-source weighting
if MTssChi in xsIn:
xsOut[MTssChi] += compDensity * fissRate * xsIn[MTssChi]
norms[MTssChi] += compDensity * fissRate
# Update neutrons per fission (nutot, nuprompt, nudelay, and nu_ss)
if MTnutot in xsIn:
xsOut[MTnutot] += compDensity * xsIn[MTfission] * xsIn[MTnutot]
norms[MTnutot] += compDensity * xsIn[MTfission]
if MTnudelay in xsIn:
xsOut[MTnudelay] += compDensity * xsIn[MTfission] * xsIn[MTnudelay]
norms[MTnudelay] += compDensity * xsIn[MTfission]
if MTnuprompt in xsIn:
xsOut[MTnuprompt] += compDensity * xsIn[MTfission] * xsIn[MTnuprompt]
norms[MTnuprompt] += compDensity * xsIn[MTfission]
if MTssNu in xsIn:
xsOut[MTssNu] += compDensity * xsIn[MTfission] * xsIn[MTssNu]
norms[MTssNu] += compDensity * xsIn[MTfission]
# Update numerator and denominator for inverse velocity using density weighting
if MTinvel in xsIn:
xsOut[MTinvel] += compDensity * xsIn[MTinvel]
norms[MTinvel] += compDensity
# Compute cross sections that are density-weighted sums
MTsSum = [MT for MT in MTs if MT not in MTsAvg]
for MT in MTsSum:
if MT in xsIn:
xsOut[MT] += compDensity * xsIn[MT]
# Normalize XS averages
for MT in MTsAvg:
if np.all(norms[MT]>0.0):
xsOut[MT] /= norms[MT]
# Recompute steady-state nu and chi
if all(mts in MTs for mts in [MTnuSigF, MTfission, MTssNu, MTssChi]):
flux = xsOut[MTwgt]
promptProd = xsOut[MTnuSigF]
fission_xs = xsOut[MTfission]
nu_delayed = xsOut.get(MTnudelay, 0.)
chis_delayed = xsOut.get(MTdelayedChi, 1.)
chi_delayed = np.sum(chis_delayed, axis=0)
fission_x_prompt = xsOut[MTfissionMatrix]
nu_prompt = promptProd/fission_xs
nu_ss = (nu_prompt + nu_delayed) * fission_xs
n_per_gout = ( np.dot(fission_x_prompt, flux) + \
chi_delayed*np.sum(nu_delayed*fission_xs*flux) )
chi_ss = n_per_gout/np.sum(n_per_gout)
xsOut[MTssNu] = nu_ss
xsOut[MTssChi] = chi_ss
# Print out material XS
outName = 'xs_{0}_{1}.data'.format(shortName, numGroups)
outPath = os.path.join(dirr, outName)
if verbosity:
print 'Printing combined XS to {0}'.format(outPath)
pdtxs.write_PDT_xs_generally(outPath, xsDict)
def iterate_one_material(rootDirr, material, maxError, maxIterations, energyMesh=None, fluxDict=None, verbosity=False):
'''Perform Bondarenko iteration on one material. Fine groups within an energy element share the same background cross section.'''
sig0Vec = None
if verbosity:
print 'Performing Bondarenko iteration for material {0}'.format(material.longName)
ZAList = sorted(material.ZAList)
readerOpt = 'gendf'
totalXSDict = {}
backgroundXSDict = {}
iterationCount = 0
globalError = 1.0
for (Z,A) in ZAList:
read_one_total_xs(rootDirr, Z, A, material, totalXSDict, readerOpt, sig0Vec, energyMesh, fluxDict, verbosity)
build_all_background_xs(material, totalXSDict, backgroundXSDict, verbosity)
print_bondarenko(iterationCount, maxIterations, globalError, maxError, verbosity)
readerOpt = 'pickle'
while globalError > maxError and iterationCount < maxIterations:
globalError = 0.0
for (Z,A) in ZAList:
sig0Vec = backgroundXSDict[(Z,A)]
localError = read_one_total_xs(rootDirr, Z, A, material, totalXSDict, readerOpt, sig0Vec, energyMesh, fluxDict, verbosity)
globalError = max(localError, globalError)
build_all_background_xs(material, totalXSDict, backgroundXSDict, verbosity)
iterationCount += 1
print_bondarenko(iterationCount, maxIterations, globalError, maxError, verbosity)
return backgroundXSDict
def unset_background_xs_dict(material, backgroundXSDict, verbosity):
for key in backgroundXSDict:
size = len(backgroundXSDict[key])
sig0 = material.backgroundXSDict[key]
if sig0 == np.inf:
sig0 = 1e10
backgroundXSDict[key] = sig0 * np.ones(size)
def read_one_total_xs(rootDirr, Z, A, material, totalXSDict, readerOpt='gendf', sig0Vec=None, energyMesh=None, fluxDict=None, verbosity=False):
'''Read the total XS for one nuclide for one material'''
T = material.temperature
Sab = material.SabDict[(Z,A)]
sym = material.symDict[Z]
sig0 = material.backgroundXSDict[(Z,A)]
if sig0 == np.inf:
sig0 = 1e10
# Metastable isomeric states use the groundstate A + 400
effA = A % 400
metastableStr = ''
if A // 400 > 0:
metastableStr = 'm'
#
leafDirr = util.get_nuclide_dirr(sym, effA, Sab, metastableStr)
fullDirr = os.path.join(rootDirr, leafDirr)
parser = readgroupr.define_input_parser()
parseStr = '-i {i} -o {o} -w {w} -p none -m 1 -M -t -T {T} -Z {Z}'.format(
i=fullDirr, o=fullDirr, w=readerOpt, T=T, Z=sig0)
if verbosity > 2:
print 'Calling ./Readgroupr', parseStr
readerDict = vars(parser.parse_args(parseStr.split()))
if fluxDict is not None:
readerDict['flux'] = fluxDict[material.shortName]
readerDict['energyMesh'] = energyMesh
readerDict['sig0Vec'] = sig0Vec
readgroupr.finish_parsing(readerDict)
xsDict = readgroupr.execute_reader(readerDict)
totalXS = xsDict['tot']
epsilon = 1E-11
if (Z,A) in totalXSDict:
err = np.linalg.norm((totalXSDict[(Z,A)] - totalXS) / (totalXS + epsilon), np.inf) / np.sqrt(len(totalXS))
if verbosity > 1:
print '>>> Error for ({0}, {1}) is {2}'.format(Z, A, err)
if verbosity > 3:
print (totalXSDict[(Z,A)] - totalXS) / (totalXS + epsilon)
else:
err = 1.0
totalXSDict[(Z,A)] = totalXS
return err
def build_all_background_xs(material, totalXSDict, backgroundXSDict, verbosity=False):
'''Build the background XS for each nuclide in one material.'''
ZAList = sorted(material.ZAList)
numGroups = len(totalXSDict[ZAList[0]])
numNuclides = len(totalXSDict)
backgroundXSs = np.zeros((numNuclides, numGroups))
# Build the background XS
for i, (Z,A) in enumerate(ZAList):
mask = np.ones(numNuclides, dtype=np.bool)
mask[i] = False
atomDensity = material.atomDensity * material.elemAtomFracDict[Z] * material.abundanceDict[(Z,A)]
# Uses broadcasting
backgroundXSs[mask, :] += atomDensity * totalXSDict[(Z,A)]
backgroundXSs += material.chordLength
for i, (Z,A) in enumerate(ZAList):
atomDensity = material.atomDensity * material.elemAtomFracDict[Z] * material.abundanceDict[(Z,A)]
backgroundXSs[i, :] /= atomDensity
# Uses aliasing
backgroundXSDict[(Z,A)] = backgroundXSs[i, :]
def print_bondarenko(iterationCount, maxIterations, error, maxError, verbosity):
if verbosity:
print 'Iteration {0:2g} (max {1})'.format(iterationCount, maxIterations),
print 'Error {0:.2e} (max {1})'.format(error, maxError)
def plot_bondarenko(rootDirr, backgroundXSDict):
for (Z,A) in backgroundXSDict.keys():
from matplotlib import pyplot as plt
plt.clf()
y = backgroundXSDict[(Z,A)]
x = np.arange(len(y))
plt.semilogy(x,y)
plt.xlabel('Group index')
plt.ylabel('Background xs (b)')
nom = 'sig0_{0}_{1}.pdf'.format(Z,A)
path = os.path.join(rootDirr, nom)
plt.savefig(path)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.